2024-11-22 03:46:03,613 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-22 03:46:03,631 main DEBUG Took 0.015330 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-22 03:46:03,632 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-22 03:46:03,632 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-22 03:46:03,634 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-22 03:46:03,636 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:46:03,646 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-22 03:46:03,661 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:46:03,663 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:46:03,664 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:46:03,664 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:46:03,665 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:46:03,666 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:46:03,667 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:46:03,668 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:46:03,669 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:46:03,669 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:46:03,670 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:46:03,670 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:46:03,671 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:46:03,672 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:46:03,672 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:46:03,673 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:46:03,673 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:46:03,674 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:46:03,675 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:46:03,675 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:46:03,675 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:46:03,676 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:46:03,677 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:46:03,677 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 03:46:03,678 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:46:03,678 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-22 03:46:03,680 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 03:46:03,682 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-22 03:46:03,684 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-22 03:46:03,685 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-22 03:46:03,687 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-22 03:46:03,687 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-22 03:46:03,697 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-22 03:46:03,699 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-22 03:46:03,701 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-22 03:46:03,702 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-22 03:46:03,702 main DEBUG createAppenders(={Console}) 2024-11-22 03:46:03,703 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-22 03:46:03,704 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-22 03:46:03,704 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-22 03:46:03,705 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-22 03:46:03,705 main DEBUG OutputStream closed 2024-11-22 03:46:03,706 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-22 03:46:03,706 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-22 03:46:03,706 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-22 03:46:03,801 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-22 03:46:03,804 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-22 03:46:03,806 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-22 03:46:03,808 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-22 03:46:03,809 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-22 03:46:03,809 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-22 03:46:03,810 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-22 03:46:03,811 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-22 03:46:03,811 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-22 03:46:03,812 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-22 03:46:03,812 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-22 03:46:03,813 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-22 03:46:03,813 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-22 03:46:03,814 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-22 03:46:03,814 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-22 03:46:03,815 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-22 03:46:03,815 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-22 03:46:03,816 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-22 03:46:03,820 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-22 03:46:03,820 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-22 03:46:03,821 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-22 03:46:03,823 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-22T03:46:04,177 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57 2024-11-22 03:46:04,180 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-22 03:46:04,180 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-22T03:46:04,192 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-22T03:46:04,232 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=263, ProcessCount=11, AvailableMemoryMB=4858 2024-11-22T03:46:04,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T03:46:04,258 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/cluster_8a641521-ea10-f02c-ee52-80b3b3d82476, deleteOnExit=true 2024-11-22T03:46:04,259 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T03:46:04,260 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/test.cache.data in system properties and HBase conf 2024-11-22T03:46:04,261 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T03:46:04,261 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/hadoop.log.dir in system properties and HBase conf 2024-11-22T03:46:04,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T03:46:04,263 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T03:46:04,263 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T03:46:04,359 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-22T03:46:04,477 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T03:46:04,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:46:04,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:46:04,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T03:46:04,486 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:46:04,486 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T03:46:04,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T03:46:04,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:46:04,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:46:04,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T03:46:04,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/nfs.dump.dir in system properties and HBase conf 2024-11-22T03:46:04,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/java.io.tmpdir in system properties and HBase conf 2024-11-22T03:46:04,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:46:04,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T03:46:04,492 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T03:46:05,061 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:46:05,388 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-22T03:46:05,484 INFO [Time-limited test {}] log.Log(170): Logging initialized @2809ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-22T03:46:05,575 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:46:05,657 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:46:05,685 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:46:05,685 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:46:05,690 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:46:05,711 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:46:05,715 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3aee6cb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:46:05,717 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@380b8195{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:46:05,980 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6de997b9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/java.io.tmpdir/jetty-localhost-40119-hadoop-hdfs-3_4_1-tests_jar-_-any-14375551096902569611/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:46:05,988 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a0da00a{HTTP/1.1, (http/1.1)}{localhost:40119} 2024-11-22T03:46:05,989 INFO [Time-limited test {}] server.Server(415): Started @3315ms 2024-11-22T03:46:06,039 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:46:06,499 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:46:06,509 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:46:06,514 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:46:06,514 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:46:06,515 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:46:06,518 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26c88bf4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:46:06,519 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@621a7cbc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:46:06,668 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@11e88411{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/java.io.tmpdir/jetty-localhost-34351-hadoop-hdfs-3_4_1-tests_jar-_-any-2466987038747049935/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:46:06,669 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75423500{HTTP/1.1, (http/1.1)}{localhost:34351} 2024-11-22T03:46:06,670 INFO [Time-limited test {}] server.Server(415): Started @3996ms 2024-11-22T03:46:06,730 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:46:06,877 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:46:06,885 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:46:06,889 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:46:06,890 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:46:06,890 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:46:06,893 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43794ae7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:46:06,894 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35f1cf70{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:46:06,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@590b36b7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/java.io.tmpdir/jetty-localhost-33933-hadoop-hdfs-3_4_1-tests_jar-_-any-11027352404617115042/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:46:06,998 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@237fc06a{HTTP/1.1, (http/1.1)}{localhost:33933} 2024-11-22T03:46:06,998 INFO [Time-limited test {}] server.Server(415): Started @4325ms 2024-11-22T03:46:07,000 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:46:07,156 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/cluster_8a641521-ea10-f02c-ee52-80b3b3d82476/data/data4/current/BP-1632664268-172.17.0.2-1732247165158/current, will proceed with Du for space computation calculation, 2024-11-22T03:46:07,156 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/cluster_8a641521-ea10-f02c-ee52-80b3b3d82476/data/data1/current/BP-1632664268-172.17.0.2-1732247165158/current, will proceed with Du for space computation calculation, 2024-11-22T03:46:07,156 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/cluster_8a641521-ea10-f02c-ee52-80b3b3d82476/data/data3/current/BP-1632664268-172.17.0.2-1732247165158/current, will proceed with Du for space computation calculation, 2024-11-22T03:46:07,156 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/cluster_8a641521-ea10-f02c-ee52-80b3b3d82476/data/data2/current/BP-1632664268-172.17.0.2-1732247165158/current, will proceed with Du for space computation calculation, 2024-11-22T03:46:07,203 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:46:07,204 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:46:07,282 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x87387688ba67f16f with lease ID 0xc60229c3a1d3752c: Processing first storage report for DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a from datanode DatanodeRegistration(127.0.0.1:36195, datanodeUuid=ad4e545b-d304-428c-8a47-13f9db974967, infoPort=40077, infoSecurePort=0, ipcPort=40719, storageInfo=lv=-57;cid=testClusterID;nsid=82172230;c=1732247165158) 2024-11-22T03:46:07,283 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x87387688ba67f16f with lease ID 0xc60229c3a1d3752c: from storage DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a node DatanodeRegistration(127.0.0.1:36195, datanodeUuid=ad4e545b-d304-428c-8a47-13f9db974967, infoPort=40077, infoSecurePort=0, ipcPort=40719, storageInfo=lv=-57;cid=testClusterID;nsid=82172230;c=1732247165158), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-22T03:46:07,284 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd477700f4cbe3f01 with lease ID 0xc60229c3a1d3752d: Processing first storage report for DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2 from datanode DatanodeRegistration(127.0.0.1:40065, datanodeUuid=2d108a64-ec22-4881-9c2a-719e630f3460, infoPort=43557, infoSecurePort=0, ipcPort=44033, storageInfo=lv=-57;cid=testClusterID;nsid=82172230;c=1732247165158) 2024-11-22T03:46:07,284 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd477700f4cbe3f01 with lease ID 0xc60229c3a1d3752d: from storage DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2 node DatanodeRegistration(127.0.0.1:40065, datanodeUuid=2d108a64-ec22-4881-9c2a-719e630f3460, infoPort=43557, infoSecurePort=0, ipcPort=44033, storageInfo=lv=-57;cid=testClusterID;nsid=82172230;c=1732247165158), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:46:07,285 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x87387688ba67f16f with lease ID 0xc60229c3a1d3752c: Processing first storage report for DS-7c667cef-124b-4f3d-b1df-7e591a6ec9a2 from datanode DatanodeRegistration(127.0.0.1:36195, datanodeUuid=ad4e545b-d304-428c-8a47-13f9db974967, infoPort=40077, infoSecurePort=0, ipcPort=40719, storageInfo=lv=-57;cid=testClusterID;nsid=82172230;c=1732247165158) 2024-11-22T03:46:07,285 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x87387688ba67f16f with lease ID 0xc60229c3a1d3752c: from storage DS-7c667cef-124b-4f3d-b1df-7e591a6ec9a2 node DatanodeRegistration(127.0.0.1:36195, datanodeUuid=ad4e545b-d304-428c-8a47-13f9db974967, infoPort=40077, infoSecurePort=0, ipcPort=40719, storageInfo=lv=-57;cid=testClusterID;nsid=82172230;c=1732247165158), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T03:46:07,285 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd477700f4cbe3f01 with lease ID 0xc60229c3a1d3752d: Processing first storage report for DS-5af49768-f391-43b3-8986-3daf152e55c3 from datanode DatanodeRegistration(127.0.0.1:40065, datanodeUuid=2d108a64-ec22-4881-9c2a-719e630f3460, infoPort=43557, infoSecurePort=0, ipcPort=44033, storageInfo=lv=-57;cid=testClusterID;nsid=82172230;c=1732247165158) 2024-11-22T03:46:07,286 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd477700f4cbe3f01 with lease ID 0xc60229c3a1d3752d: from storage DS-5af49768-f391-43b3-8986-3daf152e55c3 node DatanodeRegistration(127.0.0.1:40065, datanodeUuid=2d108a64-ec22-4881-9c2a-719e630f3460, infoPort=43557, infoSecurePort=0, ipcPort=44033, storageInfo=lv=-57;cid=testClusterID;nsid=82172230;c=1732247165158), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:46:07,337 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57 2024-11-22T03:46:07,441 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/cluster_8a641521-ea10-f02c-ee52-80b3b3d82476/zookeeper_0, clientPort=61678, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/cluster_8a641521-ea10-f02c-ee52-80b3b3d82476/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/cluster_8a641521-ea10-f02c-ee52-80b3b3d82476/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T03:46:07,455 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61678 2024-11-22T03:46:07,469 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:46:07,473 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:46:07,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:46:07,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:46:08,168 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984 with version=8 2024-11-22T03:46:08,168 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/hbase-staging 2024-11-22T03:46:08,265 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-22T03:46:08,502 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c85114ed5096:0 server-side Connection retries=45 2024-11-22T03:46:08,512 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:46:08,513 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:46:08,517 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:46:08,517 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:46:08,517 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:46:08,679 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T03:46:08,754 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-22T03:46:08,767 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-22T03:46:08,772 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:46:08,807 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 66145 (auto-detected) 2024-11-22T03:46:08,809 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-22T03:46:08,840 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40393 2024-11-22T03:46:08,863 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40393 connecting to ZooKeeper ensemble=127.0.0.1:61678 2024-11-22T03:46:08,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:403930x0, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:46:08,901 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40393-0x10065897e270000 connected 2024-11-22T03:46:08,939 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:46:08,941 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:46:08,956 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:46:08,959 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984, hbase.cluster.distributed=false 2024-11-22T03:46:08,985 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:46:08,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40393 2024-11-22T03:46:09,001 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40393 2024-11-22T03:46:09,005 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40393 2024-11-22T03:46:09,006 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40393 2024-11-22T03:46:09,007 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40393 2024-11-22T03:46:09,137 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c85114ed5096:0 server-side Connection retries=45 2024-11-22T03:46:09,140 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:46:09,140 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:46:09,141 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:46:09,141 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:46:09,141 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:46:09,144 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T03:46:09,147 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:46:09,149 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37629 2024-11-22T03:46:09,152 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37629 connecting to ZooKeeper ensemble=127.0.0.1:61678 2024-11-22T03:46:09,154 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:46:09,159 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:46:09,175 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:376290x0, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:46:09,176 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:376290x0, quorum=127.0.0.1:61678, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:46:09,176 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37629-0x10065897e270001 connected 2024-11-22T03:46:09,181 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T03:46:09,191 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T03:46:09,194 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37629-0x10065897e270001, quorum=127.0.0.1:61678, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T03:46:09,203 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37629-0x10065897e270001, quorum=127.0.0.1:61678, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:46:09,205 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37629 2024-11-22T03:46:09,210 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37629 2024-11-22T03:46:09,213 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37629 2024-11-22T03:46:09,215 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37629 2024-11-22T03:46:09,216 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37629 2024-11-22T03:46:09,235 DEBUG [M:0;c85114ed5096:40393 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c85114ed5096:40393 2024-11-22T03:46:09,236 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c85114ed5096,40393,1732247168319 2024-11-22T03:46:09,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:46:09,243 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x10065897e270001, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:46:09,245 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c85114ed5096,40393,1732247168319 2024-11-22T03:46:09,269 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x10065897e270001, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T03:46:09,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:46:09,270 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x10065897e270001, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:46:09,271 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T03:46:09,272 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c85114ed5096,40393,1732247168319 from backup master directory 2024-11-22T03:46:09,275 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x10065897e270001, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:46:09,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c85114ed5096,40393,1732247168319 2024-11-22T03:46:09,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:46:09,276 WARN [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:46:09,276 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c85114ed5096,40393,1732247168319 2024-11-22T03:46:09,278 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-22T03:46:09,280 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-22T03:46:09,354 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/hbase.id] with ID: bd1659ed-4749-44d5-9c29-9f9e27a12f70 2024-11-22T03:46:09,354 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/.tmp/hbase.id 2024-11-22T03:46:09,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:46:09,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:46:09,378 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/.tmp/hbase.id]:[hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/hbase.id] 2024-11-22T03:46:09,435 INFO [master/c85114ed5096:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:46:09,441 INFO [master/c85114ed5096:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T03:46:09,462 INFO [master/c85114ed5096:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 20ms. 2024-11-22T03:46:09,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:46:09,466 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x10065897e270001, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:46:09,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:46:09,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:46:09,514 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:46:09,516 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T03:46:09,521 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:46:09,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:46:09,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:46:09,583 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store 2024-11-22T03:46:09,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:46:09,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:46:09,613 INFO [master/c85114ed5096:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-22T03:46:09,623 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:46:09,625 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:46:09,625 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:46:09,625 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:46:09,627 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:46:09,627 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:46:09,628 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:46:09,629 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732247169625Disabling compacts and flushes for region at 1732247169625Disabling writes for close at 1732247169627 (+2 ms)Writing region close event to WAL at 1732247169627Closed at 1732247169627 2024-11-22T03:46:09,632 WARN [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/.initializing 2024-11-22T03:46:09,632 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/WALs/c85114ed5096,40393,1732247168319 2024-11-22T03:46:09,658 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C40393%2C1732247168319, suffix=, logDir=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/WALs/c85114ed5096,40393,1732247168319, archiveDir=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/oldWALs, maxLogs=10 2024-11-22T03:46:09,670 INFO [master/c85114ed5096:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C40393%2C1732247168319.1732247169664 2024-11-22T03:46:09,697 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/WALs/c85114ed5096,40393,1732247168319/c85114ed5096%2C40393%2C1732247168319.1732247169664 2024-11-22T03:46:09,708 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43557:43557),(127.0.0.1/127.0.0.1:40077:40077)] 2024-11-22T03:46:09,709 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:46:09,710 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:46:09,712 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:46:09,713 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:46:09,758 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:46:09,783 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T03:46:09,788 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:46:09,791 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:46:09,791 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:46:09,795 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T03:46:09,795 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:46:09,797 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:46:09,797 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:46:09,800 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T03:46:09,800 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:46:09,802 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:46:09,802 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:46:09,806 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T03:46:09,806 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:46:09,808 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:46:09,808 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:46:09,813 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:46:09,814 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:46:09,821 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:46:09,821 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:46:09,825 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T03:46:09,828 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:46:09,834 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:46:09,836 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778171, jitterRate=-0.010505661368370056}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T03:46:09,847 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732247169728Initializing all the Stores at 1732247169731 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247169731Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247169732 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247169732Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247169733 (+1 ms)Cleaning up temporary data from old regions at 1732247169821 (+88 ms)Region opened successfully at 1732247169846 (+25 ms) 2024-11-22T03:46:09,848 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T03:46:09,885 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@743c299b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c85114ed5096/172.17.0.2:0 2024-11-22T03:46:09,922 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T03:46:09,935 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T03:46:09,935 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T03:46:09,938 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T03:46:09,939 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-22T03:46:09,948 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 7 msec 2024-11-22T03:46:09,948 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T03:46:09,981 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T03:46:09,989 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T03:46:09,990 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T03:46:09,993 INFO [master/c85114ed5096:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T03:46:09,994 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T03:46:09,996 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T03:46:09,998 INFO [master/c85114ed5096:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T03:46:10,006 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T03:46:10,008 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T03:46:10,010 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T03:46:10,011 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T03:46:10,033 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T03:46:10,035 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T03:46:10,038 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x10065897e270001, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:46:10,038 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x10065897e270001, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:46:10,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:46:10,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:46:10,041 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c85114ed5096,40393,1732247168319, sessionid=0x10065897e270000, setting cluster-up flag (Was=false) 2024-11-22T03:46:10,053 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x10065897e270001, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:46:10,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:46:10,063 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T03:46:10,064 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c85114ed5096,40393,1732247168319 2024-11-22T03:46:10,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:46:10,070 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x10065897e270001, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:46:10,074 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T03:46:10,075 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c85114ed5096,40393,1732247168319 2024-11-22T03:46:10,083 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T03:46:10,121 INFO [RS:0;c85114ed5096:37629 {}] regionserver.HRegionServer(746): ClusterId : bd1659ed-4749-44d5-9c29-9f9e27a12f70 2024-11-22T03:46:10,123 DEBUG [RS:0;c85114ed5096:37629 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T03:46:10,127 DEBUG [RS:0;c85114ed5096:37629 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T03:46:10,128 DEBUG [RS:0;c85114ed5096:37629 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T03:46:10,130 DEBUG [RS:0;c85114ed5096:37629 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T03:46:10,131 DEBUG [RS:0;c85114ed5096:37629 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cb5a5db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c85114ed5096/172.17.0.2:0 2024-11-22T03:46:10,151 DEBUG [RS:0;c85114ed5096:37629 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c85114ed5096:37629 2024-11-22T03:46:10,154 INFO [RS:0;c85114ed5096:37629 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T03:46:10,154 INFO [RS:0;c85114ed5096:37629 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T03:46:10,154 DEBUG [RS:0;c85114ed5096:37629 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T03:46:10,157 INFO [RS:0;c85114ed5096:37629 {}] regionserver.HRegionServer(2659): reportForDuty to master=c85114ed5096,40393,1732247168319 with port=37629, startcode=1732247169094 2024-11-22T03:46:10,170 DEBUG [RS:0;c85114ed5096:37629 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T03:46:10,173 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T03:46:10,182 INFO [master/c85114ed5096:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T03:46:10,193 INFO [master/c85114ed5096:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T03:46:10,201 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c85114ed5096,40393,1732247168319 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T03:46:10,210 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:46:10,210 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:46:10,211 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:46:10,212 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:46:10,213 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c85114ed5096:0, corePoolSize=10, maxPoolSize=10 2024-11-22T03:46:10,213 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:46:10,213 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c85114ed5096:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:46:10,213 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:46:10,224 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732247200224 2024-11-22T03:46:10,226 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T03:46:10,228 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T03:46:10,230 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:46:10,231 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T03:46:10,233 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T03:46:10,233 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T03:46:10,234 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T03:46:10,234 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T03:46:10,240 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:46:10,240 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T03:46:10,251 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57233, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T03:46:10,248 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:10,254 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T03:46:10,257 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T03:46:10,257 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T03:46:10,261 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T03:46:10,262 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T03:46:10,260 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40393 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-22T03:46:10,266 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247170263,5,FailOnTimeoutGroup] 2024-11-22T03:46:10,267 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247170266,5,FailOnTimeoutGroup] 2024-11-22T03:46:10,267 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:10,267 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T03:46:10,269 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:10,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:46:10,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:46:10,270 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:10,274 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T03:46:10,275 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984 2024-11-22T03:46:10,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:46:10,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:46:10,297 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:46:10,298 DEBUG [RS:0;c85114ed5096:37629 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-22T03:46:10,299 WARN [RS:0;c85114ed5096:37629 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-22T03:46:10,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:46:10,305 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:46:10,305 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:46:10,306 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:46:10,306 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:46:10,309 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:46:10,309 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:46:10,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:46:10,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:46:10,315 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:46:10,315 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:46:10,316 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:46:10,316 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:46:10,319 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:46:10,319 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:46:10,320 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:46:10,320 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:46:10,322 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740 2024-11-22T03:46:10,323 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740 2024-11-22T03:46:10,325 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:46:10,325 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:46:10,327 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:46:10,329 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:46:10,334 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:46:10,335 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=836156, jitterRate=0.06322817504405975}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:46:10,339 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732247170298Initializing all the Stores at 1732247170300 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247170300Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247170301 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247170301Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247170301Cleaning up temporary data from old regions at 1732247170325 (+24 ms)Region opened successfully at 1732247170339 (+14 ms) 2024-11-22T03:46:10,339 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:46:10,339 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:46:10,339 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:46:10,340 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:46:10,340 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:46:10,342 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:46:10,342 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732247170339Disabling compacts and flushes for region at 1732247170339Disabling writes for close at 1732247170340 (+1 ms)Writing region close event to WAL at 1732247170341 (+1 ms)Closed at 1732247170341 2024-11-22T03:46:10,345 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:46:10,345 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T03:46:10,355 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T03:46:10,362 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:46:10,365 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T03:46:10,400 INFO [RS:0;c85114ed5096:37629 {}] regionserver.HRegionServer(2659): reportForDuty to master=c85114ed5096,40393,1732247168319 with port=37629, startcode=1732247169094 2024-11-22T03:46:10,403 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40393 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c85114ed5096,37629,1732247169094 2024-11-22T03:46:10,406 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40393 {}] master.ServerManager(517): Registering regionserver=c85114ed5096,37629,1732247169094 2024-11-22T03:46:10,415 DEBUG [RS:0;c85114ed5096:37629 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984 2024-11-22T03:46:10,416 DEBUG [RS:0;c85114ed5096:37629 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37663 2024-11-22T03:46:10,416 DEBUG [RS:0;c85114ed5096:37629 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T03:46:10,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:46:10,423 DEBUG [RS:0;c85114ed5096:37629 {}] zookeeper.ZKUtil(111): regionserver:37629-0x10065897e270001, quorum=127.0.0.1:61678, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c85114ed5096,37629,1732247169094 2024-11-22T03:46:10,423 WARN [RS:0;c85114ed5096:37629 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:46:10,423 INFO [RS:0;c85114ed5096:37629 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:46:10,424 DEBUG [RS:0;c85114ed5096:37629 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094 2024-11-22T03:46:10,427 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c85114ed5096,37629,1732247169094] 2024-11-22T03:46:10,452 INFO [RS:0;c85114ed5096:37629 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T03:46:10,472 INFO [RS:0;c85114ed5096:37629 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T03:46:10,476 INFO [RS:0;c85114ed5096:37629 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T03:46:10,477 INFO [RS:0;c85114ed5096:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:10,478 INFO [RS:0;c85114ed5096:37629 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T03:46:10,484 INFO [RS:0;c85114ed5096:37629 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T03:46:10,487 INFO [RS:0;c85114ed5096:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:10,487 DEBUG [RS:0;c85114ed5096:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:46:10,488 DEBUG [RS:0;c85114ed5096:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:46:10,488 DEBUG [RS:0;c85114ed5096:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:46:10,493 DEBUG [RS:0;c85114ed5096:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:46:10,493 DEBUG [RS:0;c85114ed5096:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:46:10,494 DEBUG [RS:0;c85114ed5096:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c85114ed5096:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:46:10,494 DEBUG [RS:0;c85114ed5096:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:46:10,494 DEBUG [RS:0;c85114ed5096:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:46:10,494 DEBUG [RS:0;c85114ed5096:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:46:10,494 DEBUG [RS:0;c85114ed5096:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:46:10,494 DEBUG [RS:0;c85114ed5096:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:46:10,495 DEBUG [RS:0;c85114ed5096:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:46:10,496 DEBUG [RS:0;c85114ed5096:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c85114ed5096:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:46:10,496 DEBUG [RS:0;c85114ed5096:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:46:10,500 INFO [RS:0;c85114ed5096:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:10,500 INFO [RS:0;c85114ed5096:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:10,501 INFO [RS:0;c85114ed5096:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:10,501 INFO [RS:0;c85114ed5096:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:10,501 INFO [RS:0;c85114ed5096:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:10,501 INFO [RS:0;c85114ed5096:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,37629,1732247169094-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:46:10,516 WARN [c85114ed5096:40393 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T03:46:10,533 INFO [RS:0;c85114ed5096:37629 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T03:46:10,536 INFO [RS:0;c85114ed5096:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,37629,1732247169094-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:10,537 INFO [RS:0;c85114ed5096:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:10,537 INFO [RS:0;c85114ed5096:37629 {}] regionserver.Replication(171): c85114ed5096,37629,1732247169094 started 2024-11-22T03:46:10,565 INFO [RS:0;c85114ed5096:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:10,565 INFO [RS:0;c85114ed5096:37629 {}] regionserver.HRegionServer(1482): Serving as c85114ed5096,37629,1732247169094, RpcServer on c85114ed5096/172.17.0.2:37629, sessionid=0x10065897e270001 2024-11-22T03:46:10,566 DEBUG [RS:0;c85114ed5096:37629 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T03:46:10,567 DEBUG [RS:0;c85114ed5096:37629 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c85114ed5096,37629,1732247169094 2024-11-22T03:46:10,567 DEBUG [RS:0;c85114ed5096:37629 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c85114ed5096,37629,1732247169094' 2024-11-22T03:46:10,567 DEBUG [RS:0;c85114ed5096:37629 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T03:46:10,568 DEBUG [RS:0;c85114ed5096:37629 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T03:46:10,569 DEBUG [RS:0;c85114ed5096:37629 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T03:46:10,569 DEBUG [RS:0;c85114ed5096:37629 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T03:46:10,569 DEBUG [RS:0;c85114ed5096:37629 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c85114ed5096,37629,1732247169094 2024-11-22T03:46:10,569 DEBUG [RS:0;c85114ed5096:37629 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c85114ed5096,37629,1732247169094' 2024-11-22T03:46:10,570 DEBUG [RS:0;c85114ed5096:37629 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T03:46:10,570 DEBUG [RS:0;c85114ed5096:37629 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T03:46:10,571 DEBUG [RS:0;c85114ed5096:37629 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T03:46:10,571 INFO [RS:0;c85114ed5096:37629 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T03:46:10,571 INFO [RS:0;c85114ed5096:37629 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T03:46:10,683 INFO [RS:0;c85114ed5096:37629 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C37629%2C1732247169094, suffix=, logDir=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094, archiveDir=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/oldWALs, maxLogs=32 2024-11-22T03:46:10,686 INFO [RS:0;c85114ed5096:37629 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C37629%2C1732247169094.1732247170686 2024-11-22T03:46:10,697 INFO [RS:0;c85114ed5096:37629 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247170686 2024-11-22T03:46:10,705 DEBUG [RS:0;c85114ed5096:37629 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43557:43557),(127.0.0.1/127.0.0.1:40077:40077)] 2024-11-22T03:46:10,769 DEBUG [c85114ed5096:40393 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T03:46:10,784 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c85114ed5096,37629,1732247169094 2024-11-22T03:46:10,791 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c85114ed5096,37629,1732247169094, state=OPENING 2024-11-22T03:46:10,795 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T03:46:10,797 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x10065897e270001, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:46:10,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:46:10,798 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:46:10,798 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:46:10,800 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:46:10,803 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c85114ed5096,37629,1732247169094}] 2024-11-22T03:46:10,981 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T03:46:10,985 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47093, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T03:46:10,999 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T03:46:10,999 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:46:11,003 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C37629%2C1732247169094.meta, suffix=.meta, logDir=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094, archiveDir=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/oldWALs, maxLogs=32 2024-11-22T03:46:11,005 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C37629%2C1732247169094.meta.1732247171005.meta 2024-11-22T03:46:11,015 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.meta.1732247171005.meta 2024-11-22T03:46:11,018 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43557:43557),(127.0.0.1/127.0.0.1:40077:40077)] 2024-11-22T03:46:11,019 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:46:11,021 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T03:46:11,023 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T03:46:11,028 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T03:46:11,032 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T03:46:11,033 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:46:11,033 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T03:46:11,033 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T03:46:11,038 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:46:11,041 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:46:11,041 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:46:11,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:46:11,043 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:46:11,045 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:46:11,045 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:46:11,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:46:11,047 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:46:11,049 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:46:11,049 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:46:11,050 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:46:11,051 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:46:11,053 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:46:11,053 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:46:11,054 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:46:11,054 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:46:11,057 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740 2024-11-22T03:46:11,060 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740 2024-11-22T03:46:11,063 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:46:11,063 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:46:11,066 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:46:11,070 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:46:11,073 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=804151, jitterRate=0.02253146469593048}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:46:11,073 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T03:46:11,075 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732247171034Writing region info on filesystem at 1732247171034Initializing all the Stores at 1732247171037 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247171037Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247171038 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247171038Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247171038Cleaning up temporary data from old regions at 1732247171063 (+25 ms)Running coprocessor post-open hooks at 1732247171073 (+10 ms)Region opened successfully at 1732247171075 (+2 ms) 2024-11-22T03:46:11,084 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732247170972 2024-11-22T03:46:11,096 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T03:46:11,097 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T03:46:11,099 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c85114ed5096,37629,1732247169094 2024-11-22T03:46:11,103 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c85114ed5096,37629,1732247169094, state=OPEN 2024-11-22T03:46:11,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:46:11,107 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c85114ed5096,37629,1732247169094 2024-11-22T03:46:11,108 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:46:11,109 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x10065897e270001, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:46:11,109 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:46:11,117 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T03:46:11,117 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c85114ed5096,37629,1732247169094 in 305 msec 2024-11-22T03:46:11,127 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T03:46:11,127 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 765 msec 2024-11-22T03:46:11,129 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:46:11,129 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T03:46:11,152 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:46:11,154 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c85114ed5096,37629,1732247169094, seqNum=-1] 2024-11-22T03:46:11,178 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:46:11,180 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37293, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:46:11,207 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0860 sec 2024-11-22T03:46:11,208 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732247171208, completionTime=-1 2024-11-22T03:46:11,211 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T03:46:11,211 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T03:46:11,245 INFO [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T03:46:11,245 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732247231245 2024-11-22T03:46:11,245 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732247291245 2024-11-22T03:46:11,245 INFO [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 33 msec 2024-11-22T03:46:11,248 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,40393,1732247168319-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:11,248 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,40393,1732247168319-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:11,248 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,40393,1732247168319-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:11,250 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c85114ed5096:40393, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:11,250 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:11,251 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:11,258 DEBUG [master/c85114ed5096:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T03:46:11,290 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.014sec 2024-11-22T03:46:11,292 INFO [master/c85114ed5096:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T03:46:11,293 INFO [master/c85114ed5096:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T03:46:11,295 INFO [master/c85114ed5096:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T03:46:11,295 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T03:46:11,296 INFO [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T03:46:11,297 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,40393,1732247168319-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:46:11,297 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,40393,1732247168319-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T03:46:11,311 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T03:46:11,312 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T03:46:11,313 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,40393,1732247168319-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:46:11,336 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b92d831, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:46:11,339 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-22T03:46:11,339 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-22T03:46:11,344 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c85114ed5096,40393,-1 for getting cluster id 2024-11-22T03:46:11,347 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T03:46:11,363 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'bd1659ed-4749-44d5-9c29-9f9e27a12f70' 2024-11-22T03:46:11,366 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T03:46:11,366 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "bd1659ed-4749-44d5-9c29-9f9e27a12f70" 2024-11-22T03:46:11,368 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ff46444, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:46:11,368 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c85114ed5096,40393,-1] 2024-11-22T03:46:11,371 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T03:46:11,373 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:46:11,376 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47168, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T03:46:11,379 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f0ec0b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:46:11,380 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:46:11,391 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c85114ed5096,37629,1732247169094, seqNum=-1] 2024-11-22T03:46:11,392 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:46:11,400 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40096, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:46:11,425 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c85114ed5096,40393,1732247168319 2024-11-22T03:46:11,426 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:46:11,436 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T03:46:11,441 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T03:46:11,447 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is c85114ed5096,40393,1732247168319 2024-11-22T03:46:11,451 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@92ba0a6 2024-11-22T03:46:11,453 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T03:46:11,456 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47182, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T03:46:11,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40393 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T03:46:11,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40393 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T03:46:11,467 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40393 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:46:11,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40393 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-22T03:46:11,521 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T03:46:11,524 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40393 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-22T03:46:11,524 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:46:11,527 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T03:46:11,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40393 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T03:46:11,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741835_1011 (size=389) 2024-11-22T03:46:11,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741835_1011 (size=389) 2024-11-22T03:46:11,618 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => adcda6129787d6076f1761f187655562, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984 2024-11-22T03:46:11,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741836_1012 (size=72) 2024-11-22T03:46:11,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741836_1012 (size=72) 2024-11-22T03:46:11,637 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:46:11,637 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing adcda6129787d6076f1761f187655562, disabling compactions & flushes 2024-11-22T03:46:11,637 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562. 2024-11-22T03:46:11,637 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562. 2024-11-22T03:46:11,637 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562. after waiting 0 ms 2024-11-22T03:46:11,637 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562. 2024-11-22T03:46:11,637 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562. 2024-11-22T03:46:11,637 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for adcda6129787d6076f1761f187655562: Waiting for close lock at 1732247171637Disabling compacts and flushes for region at 1732247171637Disabling writes for close at 1732247171637Writing region close event to WAL at 1732247171637Closed at 1732247171637 2024-11-22T03:46:11,640 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T03:46:11,644 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732247171640"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732247171640"}]},"ts":"1732247171640"} 2024-11-22T03:46:11,651 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T03:46:11,653 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T03:46:11,655 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732247171653"}]},"ts":"1732247171653"} 2024-11-22T03:46:11,660 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-22T03:46:11,661 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=adcda6129787d6076f1761f187655562, ASSIGN}] 2024-11-22T03:46:11,664 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=adcda6129787d6076f1761f187655562, ASSIGN 2024-11-22T03:46:11,666 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=adcda6129787d6076f1761f187655562, ASSIGN; state=OFFLINE, location=c85114ed5096,37629,1732247169094; forceNewPlan=false, retain=false 2024-11-22T03:46:11,817 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=adcda6129787d6076f1761f187655562, regionState=OPENING, regionLocation=c85114ed5096,37629,1732247169094 2024-11-22T03:46:11,824 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=adcda6129787d6076f1761f187655562, ASSIGN because future has completed 2024-11-22T03:46:11,826 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure adcda6129787d6076f1761f187655562, server=c85114ed5096,37629,1732247169094}] 2024-11-22T03:46:11,989 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562. 2024-11-22T03:46:11,989 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => adcda6129787d6076f1761f187655562, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:46:11,990 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling adcda6129787d6076f1761f187655562 2024-11-22T03:46:11,990 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:46:11,990 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for adcda6129787d6076f1761f187655562 2024-11-22T03:46:11,990 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for adcda6129787d6076f1761f187655562 2024-11-22T03:46:11,996 INFO [StoreOpener-adcda6129787d6076f1761f187655562-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region adcda6129787d6076f1761f187655562 2024-11-22T03:46:12,000 INFO [StoreOpener-adcda6129787d6076f1761f187655562-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region adcda6129787d6076f1761f187655562 columnFamilyName info 2024-11-22T03:46:12,000 DEBUG [StoreOpener-adcda6129787d6076f1761f187655562-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:46:12,001 INFO [StoreOpener-adcda6129787d6076f1761f187655562-1 {}] regionserver.HStore(327): Store=adcda6129787d6076f1761f187655562/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:46:12,002 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for adcda6129787d6076f1761f187655562 2024-11-22T03:46:12,004 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562 2024-11-22T03:46:12,005 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562 2024-11-22T03:46:12,006 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for adcda6129787d6076f1761f187655562 2024-11-22T03:46:12,006 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for adcda6129787d6076f1761f187655562 2024-11-22T03:46:12,009 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for adcda6129787d6076f1761f187655562 2024-11-22T03:46:12,013 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:46:12,018 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened adcda6129787d6076f1761f187655562; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=775347, jitterRate=-0.014096289873123169}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T03:46:12,018 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for adcda6129787d6076f1761f187655562 2024-11-22T03:46:12,021 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for adcda6129787d6076f1761f187655562: Running coprocessor pre-open hook at 1732247171991Writing region info on filesystem at 1732247171991Initializing all the Stores at 1732247171993 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247171993Cleaning up temporary data from old regions at 1732247172006 (+13 ms)Running coprocessor post-open hooks at 1732247172019 (+13 ms)Region opened successfully at 1732247172020 (+1 ms) 2024-11-22T03:46:12,023 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562., pid=6, masterSystemTime=1732247171982 2024-11-22T03:46:12,027 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562. 2024-11-22T03:46:12,027 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562. 2024-11-22T03:46:12,029 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=adcda6129787d6076f1761f187655562, regionState=OPEN, openSeqNum=2, regionLocation=c85114ed5096,37629,1732247169094 2024-11-22T03:46:12,033 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure adcda6129787d6076f1761f187655562, server=c85114ed5096,37629,1732247169094 because future has completed 2024-11-22T03:46:12,042 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T03:46:12,042 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure adcda6129787d6076f1761f187655562, server=c85114ed5096,37629,1732247169094 in 210 msec 2024-11-22T03:46:12,050 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T03:46:12,050 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=adcda6129787d6076f1761f187655562, ASSIGN in 381 msec 2024-11-22T03:46:12,053 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T03:46:12,053 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732247172053"}]},"ts":"1732247172053"} 2024-11-22T03:46:12,058 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-22T03:46:12,060 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T03:46:12,065 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 587 msec 2024-11-22T03:46:16,537 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-22T03:46:16,596 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T03:46:16,598 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-22T03:46:18,750 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T03:46:18,750 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T03:46:18,752 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-22T03:46:18,752 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-22T03:46:18,753 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:46:18,753 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T03:46:18,754 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-22T03:46:18,754 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-22T03:46:21,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40393 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T03:46:21,548 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-22T03:46:21,550 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-22T03:46:21,558 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-22T03:46:21,559 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562. 2024-11-22T03:46:21,560 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C37629%2C1732247169094.1732247181560 2024-11-22T03:46:21,568 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:46:21,568 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:46:21,569 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:46:21,569 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:46:21,569 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:46:21,569 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247170686 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247181560 2024-11-22T03:46:21,571 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43557:43557),(127.0.0.1/127.0.0.1:40077:40077)] 2024-11-22T03:46:21,571 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247170686 is not closed yet, will try archiving it next time 2024-11-22T03:46:21,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741833_1009 (size=451) 2024-11-22T03:46:21,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741833_1009 (size=451) 2024-11-22T03:46:21,574 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247170686 to hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/oldWALs/c85114ed5096%2C37629%2C1732247169094.1732247170686 2024-11-22T03:46:21,580 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562., hostname=c85114ed5096,37629,1732247169094, seqNum=2] 2024-11-22T03:46:33,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37629 {}] regionserver.HRegion(8855): Flush requested on adcda6129787d6076f1761f187655562 2024-11-22T03:46:33,626 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing adcda6129787d6076f1761f187655562 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:46:33,681 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/.tmp/info/a2f659c58e614b7690d5004de9729a7d is 1080, key is row0001/info:/1732247181584/Put/seqid=0 2024-11-22T03:46:33,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741838_1014 (size=12509) 2024-11-22T03:46:33,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741838_1014 (size=12509) 2024-11-22T03:46:33,693 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/.tmp/info/a2f659c58e614b7690d5004de9729a7d 2024-11-22T03:46:33,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/.tmp/info/a2f659c58e614b7690d5004de9729a7d as hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/a2f659c58e614b7690d5004de9729a7d 2024-11-22T03:46:33,749 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/a2f659c58e614b7690d5004de9729a7d, entries=7, sequenceid=11, filesize=12.2 K 2024-11-22T03:46:33,756 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for adcda6129787d6076f1761f187655562 in 129ms, sequenceid=11, compaction requested=false 2024-11-22T03:46:33,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for adcda6129787d6076f1761f187655562: 2024-11-22T03:46:37,335 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T03:46:41,646 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C37629%2C1732247169094.1732247201646 2024-11-22T03:46:41,861 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 211 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK], DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK]] 2024-11-22T03:46:41,862 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:46:41,862 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:46:41,862 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:46:41,862 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:46:41,863 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:46:41,863 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247181560 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247201646 2024-11-22T03:46:41,864 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40077:40077),(127.0.0.1/127.0.0.1:43557:43557)] 2024-11-22T03:46:41,864 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247181560 is not closed yet, will try archiving it next time 2024-11-22T03:46:41,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741837_1013 (size=12399) 2024-11-22T03:46:41,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741837_1013 (size=12399) 2024-11-22T03:46:42,071 INFO [FSHLog-0-hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984-prefix:c85114ed5096,37629,1732247169094 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK], DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK]] 2024-11-22T03:46:44,276 INFO [FSHLog-0-hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984-prefix:c85114ed5096,37629,1732247169094 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK], DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK]] 2024-11-22T03:46:46,481 INFO [FSHLog-0-hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984-prefix:c85114ed5096,37629,1732247169094 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK], DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK]] 2024-11-22T03:46:48,684 INFO [FSHLog-0-hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984-prefix:c85114ed5096,37629,1732247169094 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK], DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK]] 2024-11-22T03:46:48,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37629 {}] regionserver.HRegion(8855): Flush requested on adcda6129787d6076f1761f187655562 2024-11-22T03:46:48,685 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing adcda6129787d6076f1761f187655562 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:46:48,887 INFO [FSHLog-0-hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984-prefix:c85114ed5096,37629,1732247169094 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK], DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK]] 2024-11-22T03:46:48,896 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/.tmp/info/d375f6fccbdd4263b3666654b767d167 is 1080, key is row0008/info:/1732247195625/Put/seqid=0 2024-11-22T03:46:48,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741840_1016 (size=12509) 2024-11-22T03:46:48,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741840_1016 (size=12509) 2024-11-22T03:46:48,921 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/.tmp/info/d375f6fccbdd4263b3666654b767d167 2024-11-22T03:46:48,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/.tmp/info/d375f6fccbdd4263b3666654b767d167 as hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/d375f6fccbdd4263b3666654b767d167 2024-11-22T03:46:48,945 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/d375f6fccbdd4263b3666654b767d167, entries=7, sequenceid=21, filesize=12.2 K 2024-11-22T03:46:49,149 INFO [FSHLog-0-hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984-prefix:c85114ed5096,37629,1732247169094 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK], DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK]] 2024-11-22T03:46:49,149 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for adcda6129787d6076f1761f187655562 in 464ms, sequenceid=21, compaction requested=false 2024-11-22T03:46:49,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for adcda6129787d6076f1761f187655562: 2024-11-22T03:46:49,149 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-22T03:46:49,150 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:46:49,150 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/a2f659c58e614b7690d5004de9729a7d because midkey is the same as first or last row 2024-11-22T03:46:50,890 INFO [FSHLog-0-hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984-prefix:c85114ed5096,37629,1732247169094 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK], DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK]] 2024-11-22T03:46:51,315 INFO [master/c85114ed5096:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T03:46:51,315 INFO [master/c85114ed5096:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T03:46:53,095 INFO [FSHLog-0-hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984-prefix:c85114ed5096,37629,1732247169094 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK], DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK]] 2024-11-22T03:46:53,098 WARN [FSHLog-0-hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984-prefix:c85114ed5096,37629,1732247169094 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK], DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK]] 2024-11-22T03:46:53,099 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c85114ed5096%2C37629%2C1732247169094:(num 1732247201646) roll requested 2024-11-22T03:46:53,100 INFO [regionserver/c85114ed5096:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C37629%2C1732247169094.1732247213099 2024-11-22T03:46:53,314 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 212 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK], DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK]] 2024-11-22T03:46:53,315 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:46:53,315 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:46:53,315 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:46:53,315 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:46:53,316 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:46:53,316 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247201646 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247213099 2024-11-22T03:46:53,317 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43557:43557),(127.0.0.1/127.0.0.1:40077:40077)] 2024-11-22T03:46:53,317 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247201646 is not closed yet, will try archiving it next time 2024-11-22T03:46:53,317 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247181560 to hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/oldWALs/c85114ed5096%2C37629%2C1732247169094.1732247181560 2024-11-22T03:46:53,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741839_1015 (size=7739) 2024-11-22T03:46:53,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741839_1015 (size=7739) 2024-11-22T03:46:55,300 INFO [FSHLog-0-hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984-prefix:c85114ed5096,37629,1732247169094 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK], DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK]] 2024-11-22T03:46:56,991 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region adcda6129787d6076f1761f187655562, had cached 0 bytes from a total of 25018 2024-11-22T03:46:57,508 INFO [FSHLog-0-hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984-prefix:c85114ed5096,37629,1732247169094 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK], DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK]] 2024-11-22T03:46:59,716 INFO [FSHLog-0-hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984-prefix:c85114ed5096,37629,1732247169094 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK], DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK]] 2024-11-22T03:47:01,924 INFO [FSHLog-0-hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984-prefix:c85114ed5096,37629,1732247169094 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK], DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK]] 2024-11-22T03:47:03,927 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T03:47:03,928 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C37629%2C1732247169094.1732247223928 2024-11-22T03:47:07,335 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T03:47:08,941 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5009 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK], DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK]] 2024-11-22T03:47:08,945 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5009 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK], DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK]] 2024-11-22T03:47:08,945 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c85114ed5096%2C37629%2C1732247169094:(num 1732247223928) roll requested 2024-11-22T03:47:08,945 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:08,945 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:08,945 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:08,946 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:08,946 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:08,946 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247213099 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247223928 2024-11-22T03:47:08,947 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40077:40077),(127.0.0.1/127.0.0.1:43557:43557)] 2024-11-22T03:47:08,947 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247213099 is not closed yet, will try archiving it next time 2024-11-22T03:47:08,948 INFO [regionserver/c85114ed5096:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C37629%2C1732247169094.1732247228947 2024-11-22T03:47:08,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741841_1017 (size=4753) 2024-11-22T03:47:08,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741841_1017 (size=4753) 2024-11-22T03:47:13,953 INFO [FSHLog-0-hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984-prefix:c85114ed5096,37629,1732247169094 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK], DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK]] 2024-11-22T03:47:13,953 WARN [FSHLog-0-hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984-prefix:c85114ed5096,37629,1732247169094 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK], DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK]] 2024-11-22T03:47:13,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37629 {}] regionserver.HRegion(8855): Flush requested on adcda6129787d6076f1761f187655562 2024-11-22T03:47:13,955 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing adcda6129787d6076f1761f187655562 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:47:13,964 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5010 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK], DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK]] 2024-11-22T03:47:13,964 WARN [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5010 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK], DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK]] 2024-11-22T03:47:15,955 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T03:47:18,960 INFO [FSHLog-0-hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984-prefix:c85114ed5096,37629,1732247169094 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK], DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK]] 2024-11-22T03:47:18,960 WARN [FSHLog-0-hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984-prefix:c85114ed5096,37629,1732247169094 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK], DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK]] 2024-11-22T03:47:18,961 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:18,961 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:18,962 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:18,962 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:18,963 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:18,963 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247223928 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247228947 2024-11-22T03:47:18,965 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43557:43557),(127.0.0.1/127.0.0.1:40077:40077)] 2024-11-22T03:47:18,966 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247223928 is not closed yet, will try archiving it next time 2024-11-22T03:47:18,966 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c85114ed5096%2C37629%2C1732247169094:(num 1732247228947) roll requested 2024-11-22T03:47:18,967 INFO [regionserver/c85114ed5096:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C37629%2C1732247169094.1732247238966 2024-11-22T03:47:18,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741842_1018 (size=1569) 2024-11-22T03:47:18,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741842_1018 (size=1569) 2024-11-22T03:47:18,971 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/.tmp/info/f8f1cab1a6584a20b3109db848c4d2e7 is 1080, key is row0015/info:/1732247210688/Put/seqid=0 2024-11-22T03:47:18,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741844_1020 (size=12509) 2024-11-22T03:47:18,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741844_1020 (size=12509) 2024-11-22T03:47:18,981 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/.tmp/info/f8f1cab1a6584a20b3109db848c4d2e7 2024-11-22T03:47:18,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/.tmp/info/f8f1cab1a6584a20b3109db848c4d2e7 as hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/f8f1cab1a6584a20b3109db848c4d2e7 2024-11-22T03:47:19,000 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/f8f1cab1a6584a20b3109db848c4d2e7, entries=7, sequenceid=31, filesize=12.2 K 2024-11-22T03:47:23,985 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5012 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK], DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK]] 2024-11-22T03:47:23,986 WARN [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5012 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK], DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK]] 2024-11-22T03:47:24,002 INFO [FSHLog-0-hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984-prefix:c85114ed5096,37629,1732247169094 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK], DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK]] 2024-11-22T03:47:24,002 WARN [FSHLog-0-hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984-prefix:c85114ed5096,37629,1732247169094 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40065,DS-b75c4909-8b05-412c-8c5e-2602bd6d87c2,DISK], DatanodeInfoWithStorage[127.0.0.1:36195,DS-13f3a00d-09d0-4bbf-ba10-13cb0f59477a,DISK]] 2024-11-22T03:47:24,003 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for adcda6129787d6076f1761f187655562 in 10048ms, sequenceid=31, compaction requested=true 2024-11-22T03:47:24,003 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:24,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for adcda6129787d6076f1761f187655562: 2024-11-22T03:47:24,003 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:24,004 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-22T03:47:24,004 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:24,004 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:47:24,004 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:24,004 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/a2f659c58e614b7690d5004de9729a7d because midkey is the same as first or last row 2024-11-22T03:47:24,004 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:24,005 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247228947 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247238966 2024-11-22T03:47:24,007 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40077:40077),(127.0.0.1/127.0.0.1:43557:43557)] 2024-11-22T03:47:24,007 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247228947 is not closed yet, will try archiving it next time 2024-11-22T03:47:24,007 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247201646 to hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/oldWALs/c85114ed5096%2C37629%2C1732247169094.1732247201646 2024-11-22T03:47:24,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store adcda6129787d6076f1761f187655562:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:47:24,008 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c85114ed5096%2C37629%2C1732247169094:(num 1732247244008) roll requested 2024-11-22T03:47:24,009 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C37629%2C1732247169094.1732247244008 2024-11-22T03:47:24,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741843_1019 (size=438) 2024-11-22T03:47:24,012 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247213099 to hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/oldWALs/c85114ed5096%2C37629%2C1732247169094.1732247213099 2024-11-22T03:47:24,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:47:24,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741843_1019 (size=438) 2024-11-22T03:47:24,012 DEBUG [RS:0;c85114ed5096:37629-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:47:24,014 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247223928 to hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/oldWALs/c85114ed5096%2C37629%2C1732247169094.1732247223928 2024-11-22T03:47:24,016 DEBUG [RS:0;c85114ed5096:37629-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:47:24,016 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247228947 to hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/oldWALs/c85114ed5096%2C37629%2C1732247169094.1732247228947 2024-11-22T03:47:24,017 DEBUG [RS:0;c85114ed5096:37629-shortCompactions-0 {}] regionserver.HStore(1541): adcda6129787d6076f1761f187655562/info is initiating minor compaction (all files) 2024-11-22T03:47:24,018 INFO [RS:0;c85114ed5096:37629-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of adcda6129787d6076f1761f187655562/info in TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562. 2024-11-22T03:47:24,018 INFO [RS:0;c85114ed5096:37629-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/a2f659c58e614b7690d5004de9729a7d, hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/d375f6fccbdd4263b3666654b767d167, hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/f8f1cab1a6584a20b3109db848c4d2e7] into tmpdir=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/.tmp, totalSize=36.6 K 2024-11-22T03:47:24,019 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:24,019 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:24,019 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:24,019 DEBUG [RS:0;c85114ed5096:37629-shortCompactions-0 {}] compactions.Compactor(225): Compacting a2f659c58e614b7690d5004de9729a7d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732247181584 2024-11-22T03:47:24,020 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:24,020 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:24,020 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247238966 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247244008 2024-11-22T03:47:24,020 DEBUG [RS:0;c85114ed5096:37629-shortCompactions-0 {}] compactions.Compactor(225): Compacting d375f6fccbdd4263b3666654b767d167, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732247195625 2024-11-22T03:47:24,021 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43557:43557),(127.0.0.1/127.0.0.1:40077:40077)] 2024-11-22T03:47:24,021 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247238966 is not closed yet, will try archiving it next time 2024-11-22T03:47:24,021 DEBUG [RS:0;c85114ed5096:37629-shortCompactions-0 {}] compactions.Compactor(225): Compacting f8f1cab1a6584a20b3109db848c4d2e7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732247210688 2024-11-22T03:47:24,021 INFO [regionserver/c85114ed5096:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C37629%2C1732247169094.1732247244021 2024-11-22T03:47:24,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741845_1021 (size=93) 2024-11-22T03:47:24,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741845_1021 (size=93) 2024-11-22T03:47:24,023 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247238966 to hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/oldWALs/c85114ed5096%2C37629%2C1732247169094.1732247238966 2024-11-22T03:47:24,034 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:24,034 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:24,034 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:24,034 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:24,035 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:24,035 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247244008 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/WALs/c85114ed5096,37629,1732247169094/c85114ed5096%2C37629%2C1732247169094.1732247244021 2024-11-22T03:47:24,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741846_1022 (size=1258) 2024-11-22T03:47:24,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741846_1022 (size=1258) 2024-11-22T03:47:24,045 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43557:43557),(127.0.0.1/127.0.0.1:40077:40077)] 2024-11-22T03:47:24,056 INFO [RS:0;c85114ed5096:37629-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): adcda6129787d6076f1761f187655562#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:47:24,057 DEBUG [RS:0;c85114ed5096:37629-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/.tmp/info/0b0ae08bd7eb4650880a96d173ff8c6b is 1080, key is row0001/info:/1732247181584/Put/seqid=0 2024-11-22T03:47:24,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741848_1024 (size=27710) 2024-11-22T03:47:24,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741848_1024 (size=27710) 2024-11-22T03:47:24,076 DEBUG [RS:0;c85114ed5096:37629-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/.tmp/info/0b0ae08bd7eb4650880a96d173ff8c6b as hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/0b0ae08bd7eb4650880a96d173ff8c6b 2024-11-22T03:47:24,096 INFO [RS:0;c85114ed5096:37629-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in adcda6129787d6076f1761f187655562/info of adcda6129787d6076f1761f187655562 into 0b0ae08bd7eb4650880a96d173ff8c6b(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:47:24,096 DEBUG [RS:0;c85114ed5096:37629-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for adcda6129787d6076f1761f187655562: 2024-11-22T03:47:24,098 INFO [RS:0;c85114ed5096:37629-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562., storeName=adcda6129787d6076f1761f187655562/info, priority=13, startTime=1732247244007; duration=0sec 2024-11-22T03:47:24,099 DEBUG [RS:0;c85114ed5096:37629-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-22T03:47:24,099 DEBUG [RS:0;c85114ed5096:37629-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:47:24,099 DEBUG [RS:0;c85114ed5096:37629-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/0b0ae08bd7eb4650880a96d173ff8c6b because midkey is the same as first or last row 2024-11-22T03:47:24,099 DEBUG [RS:0;c85114ed5096:37629-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-22T03:47:24,099 DEBUG [RS:0;c85114ed5096:37629-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:47:24,099 DEBUG [RS:0;c85114ed5096:37629-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/0b0ae08bd7eb4650880a96d173ff8c6b because midkey is the same as first or last row 2024-11-22T03:47:24,099 DEBUG [RS:0;c85114ed5096:37629-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-22T03:47:24,100 DEBUG [RS:0;c85114ed5096:37629-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:47:24,100 DEBUG [RS:0;c85114ed5096:37629-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/0b0ae08bd7eb4650880a96d173ff8c6b because midkey is the same as first or last row 2024-11-22T03:47:24,100 DEBUG [RS:0;c85114ed5096:37629-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:47:24,100 DEBUG [RS:0;c85114ed5096:37629-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: adcda6129787d6076f1761f187655562:info 2024-11-22T03:47:36,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37629 {}] regionserver.HRegion(8855): Flush requested on adcda6129787d6076f1761f187655562 2024-11-22T03:47:36,061 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing adcda6129787d6076f1761f187655562 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:47:36,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/.tmp/info/4e589ed1ce84458f8ef4e9eee90d7c82 is 1080, key is row0022/info:/1732247244022/Put/seqid=0 2024-11-22T03:47:36,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741849_1025 (size=12509) 2024-11-22T03:47:36,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741849_1025 (size=12509) 2024-11-22T03:47:36,078 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/.tmp/info/4e589ed1ce84458f8ef4e9eee90d7c82 2024-11-22T03:47:36,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/.tmp/info/4e589ed1ce84458f8ef4e9eee90d7c82 as hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/4e589ed1ce84458f8ef4e9eee90d7c82 2024-11-22T03:47:36,095 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/4e589ed1ce84458f8ef4e9eee90d7c82, entries=7, sequenceid=42, filesize=12.2 K 2024-11-22T03:47:36,097 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for adcda6129787d6076f1761f187655562 in 37ms, sequenceid=42, compaction requested=false 2024-11-22T03:47:36,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for adcda6129787d6076f1761f187655562: 2024-11-22T03:47:36,097 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-22T03:47:36,097 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:47:36,097 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/0b0ae08bd7eb4650880a96d173ff8c6b because midkey is the same as first or last row 2024-11-22T03:47:37,335 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T03:47:41,992 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region adcda6129787d6076f1761f187655562, had cached 0 bytes from a total of 40219 2024-11-22T03:47:44,081 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T03:47:44,082 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:47:44,082 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:47:44,093 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:47:44,094 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:47:44,094 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T03:47:44,094 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T03:47:44,094 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1469019133, stopped=false 2024-11-22T03:47:44,094 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c85114ed5096,40393,1732247168319 2024-11-22T03:47:44,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:47:44,096 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x10065897e270001, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:47:44,096 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x10065897e270001, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:44,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:44,096 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:47:44,096 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:47:44,096 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:47:44,097 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:47:44,097 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:47:44,097 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37629-0x10065897e270001, quorum=127.0.0.1:61678, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:47:44,097 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c85114ed5096,37629,1732247169094' ***** 2024-11-22T03:47:44,097 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T03:47:44,097 INFO [RS:0;c85114ed5096:37629 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T03:47:44,098 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T03:47:44,098 INFO [RS:0;c85114ed5096:37629 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T03:47:44,098 INFO [RS:0;c85114ed5096:37629 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T03:47:44,098 INFO [RS:0;c85114ed5096:37629 {}] regionserver.HRegionServer(3091): Received CLOSE for adcda6129787d6076f1761f187655562 2024-11-22T03:47:44,098 INFO [RS:0;c85114ed5096:37629 {}] regionserver.HRegionServer(959): stopping server c85114ed5096,37629,1732247169094 2024-11-22T03:47:44,098 INFO [RS:0;c85114ed5096:37629 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:47:44,098 INFO [RS:0;c85114ed5096:37629 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c85114ed5096:37629. 2024-11-22T03:47:44,099 DEBUG [RS:0;c85114ed5096:37629 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:47:44,099 DEBUG [RS:0;c85114ed5096:37629 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:47:44,099 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing adcda6129787d6076f1761f187655562, disabling compactions & flushes 2024-11-22T03:47:44,099 INFO [RS:0;c85114ed5096:37629 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T03:47:44,099 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562. 2024-11-22T03:47:44,099 INFO [RS:0;c85114ed5096:37629 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T03:47:44,099 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562. 2024-11-22T03:47:44,099 INFO [RS:0;c85114ed5096:37629 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T03:47:44,099 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562. after waiting 0 ms 2024-11-22T03:47:44,099 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562. 2024-11-22T03:47:44,099 INFO [RS:0;c85114ed5096:37629 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T03:47:44,099 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing adcda6129787d6076f1761f187655562 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-22T03:47:44,099 INFO [RS:0;c85114ed5096:37629 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-22T03:47:44,099 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:47:44,099 DEBUG [RS:0;c85114ed5096:37629 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, adcda6129787d6076f1761f187655562=TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562.} 2024-11-22T03:47:44,099 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:47:44,100 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:47:44,100 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:47:44,100 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:47:44,100 DEBUG [RS:0;c85114ed5096:37629 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, adcda6129787d6076f1761f187655562 2024-11-22T03:47:44,100 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-22T03:47:44,105 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/.tmp/info/1b8a112f91704f0185cc0415e17dec84 is 1080, key is row0029/info:/1732247258064/Put/seqid=0 2024-11-22T03:47:44,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741850_1026 (size=8193) 2024-11-22T03:47:44,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741850_1026 (size=8193) 2024-11-22T03:47:44,113 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/.tmp/info/1b8a112f91704f0185cc0415e17dec84 2024-11-22T03:47:44,124 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740/.tmp/info/8f19d8aabd9341ec88a22ef48b0080bd is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562./info:regioninfo/1732247172028/Put/seqid=0 2024-11-22T03:47:44,125 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/.tmp/info/1b8a112f91704f0185cc0415e17dec84 as hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/1b8a112f91704f0185cc0415e17dec84 2024-11-22T03:47:44,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741851_1027 (size=7016) 2024-11-22T03:47:44,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741851_1027 (size=7016) 2024-11-22T03:47:44,134 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740/.tmp/info/8f19d8aabd9341ec88a22ef48b0080bd 2024-11-22T03:47:44,135 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/1b8a112f91704f0185cc0415e17dec84, entries=3, sequenceid=48, filesize=8.0 K 2024-11-22T03:47:44,136 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for adcda6129787d6076f1761f187655562 in 37ms, sequenceid=48, compaction requested=true 2024-11-22T03:47:44,137 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/a2f659c58e614b7690d5004de9729a7d, hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/d375f6fccbdd4263b3666654b767d167, hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/f8f1cab1a6584a20b3109db848c4d2e7] to archive 2024-11-22T03:47:44,141 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T03:47:44,145 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/a2f659c58e614b7690d5004de9729a7d to hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/archive/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/a2f659c58e614b7690d5004de9729a7d 2024-11-22T03:47:44,147 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/d375f6fccbdd4263b3666654b767d167 to hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/archive/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/d375f6fccbdd4263b3666654b767d167 2024-11-22T03:47:44,149 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/f8f1cab1a6584a20b3109db848c4d2e7 to hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/archive/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/info/f8f1cab1a6584a20b3109db848c4d2e7 2024-11-22T03:47:44,160 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740/.tmp/ns/54db887a1bd34f74ab21065e52c0cae2 is 43, key is default/ns:d/1732247171184/Put/seqid=0 2024-11-22T03:47:44,160 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c85114ed5096:40393 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-22T03:47:44,162 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [a2f659c58e614b7690d5004de9729a7d=12509, d375f6fccbdd4263b3666654b767d167=12509, f8f1cab1a6584a20b3109db848c4d2e7=12509] 2024-11-22T03:47:44,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741852_1028 (size=5153) 2024-11-22T03:47:44,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741852_1028 (size=5153) 2024-11-22T03:47:44,167 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740/.tmp/ns/54db887a1bd34f74ab21065e52c0cae2 2024-11-22T03:47:44,168 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/default/TestLogRolling-testSlowSyncLogRolling/adcda6129787d6076f1761f187655562/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-22T03:47:44,170 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562. 2024-11-22T03:47:44,171 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for adcda6129787d6076f1761f187655562: Waiting for close lock at 1732247264098Running coprocessor pre-close hooks at 1732247264099 (+1 ms)Disabling compacts and flushes for region at 1732247264099Disabling writes for close at 1732247264099Obtaining lock to block concurrent updates at 1732247264099Preparing flush snapshotting stores in adcda6129787d6076f1761f187655562 at 1732247264099Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732247264100 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562. at 1732247264100Flushing adcda6129787d6076f1761f187655562/info: creating writer at 1732247264101 (+1 ms)Flushing adcda6129787d6076f1761f187655562/info: appending metadata at 1732247264105 (+4 ms)Flushing adcda6129787d6076f1761f187655562/info: closing flushed file at 1732247264105Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4c152a2e: reopening flushed file at 1732247264123 (+18 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for adcda6129787d6076f1761f187655562 in 37ms, sequenceid=48, compaction requested=true at 1732247264136 (+13 ms)Writing region close event to WAL at 1732247264163 (+27 ms)Running coprocessor post-close hooks at 1732247264169 (+6 ms)Closed at 1732247264170 (+1 ms) 2024-11-22T03:47:44,171 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732247171459.adcda6129787d6076f1761f187655562. 2024-11-22T03:47:44,190 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740/.tmp/table/0e64410702ca430d866f267f69349604 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732247172053/Put/seqid=0 2024-11-22T03:47:44,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741853_1029 (size=5396) 2024-11-22T03:47:44,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741853_1029 (size=5396) 2024-11-22T03:47:44,196 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740/.tmp/table/0e64410702ca430d866f267f69349604 2024-11-22T03:47:44,204 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740/.tmp/info/8f19d8aabd9341ec88a22ef48b0080bd as hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740/info/8f19d8aabd9341ec88a22ef48b0080bd 2024-11-22T03:47:44,215 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740/info/8f19d8aabd9341ec88a22ef48b0080bd, entries=10, sequenceid=11, filesize=6.9 K 2024-11-22T03:47:44,216 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740/.tmp/ns/54db887a1bd34f74ab21065e52c0cae2 as hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740/ns/54db887a1bd34f74ab21065e52c0cae2 2024-11-22T03:47:44,225 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740/ns/54db887a1bd34f74ab21065e52c0cae2, entries=2, sequenceid=11, filesize=5.0 K 2024-11-22T03:47:44,226 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740/.tmp/table/0e64410702ca430d866f267f69349604 as hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740/table/0e64410702ca430d866f267f69349604 2024-11-22T03:47:44,235 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740/table/0e64410702ca430d866f267f69349604, entries=2, sequenceid=11, filesize=5.3 K 2024-11-22T03:47:44,236 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 136ms, sequenceid=11, compaction requested=false 2024-11-22T03:47:44,242 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-22T03:47:44,243 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:47:44,243 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:47:44,243 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732247264099Running coprocessor pre-close hooks at 1732247264099Disabling compacts and flushes for region at 1732247264099Disabling writes for close at 1732247264100 (+1 ms)Obtaining lock to block concurrent updates at 1732247264100Preparing flush snapshotting stores in 1588230740 at 1732247264100Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732247264100Flushing stores of hbase:meta,,1.1588230740 at 1732247264101 (+1 ms)Flushing 1588230740/info: creating writer at 1732247264101Flushing 1588230740/info: appending metadata at 1732247264123 (+22 ms)Flushing 1588230740/info: closing flushed file at 1732247264123Flushing 1588230740/ns: creating writer at 1732247264143 (+20 ms)Flushing 1588230740/ns: appending metadata at 1732247264159 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732247264159Flushing 1588230740/table: creating writer at 1732247264175 (+16 ms)Flushing 1588230740/table: appending metadata at 1732247264189 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732247264189Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d09c2a6: reopening flushed file at 1732247264203 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@752c1d5a: reopening flushed file at 1732247264215 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6cb83afb: reopening flushed file at 1732247264225 (+10 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 136ms, sequenceid=11, compaction requested=false at 1732247264236 (+11 ms)Writing region close event to WAL at 1732247264238 (+2 ms)Running coprocessor post-close hooks at 1732247264243 (+5 ms)Closed at 1732247264243 2024-11-22T03:47:44,243 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T03:47:44,300 INFO [RS:0;c85114ed5096:37629 {}] regionserver.HRegionServer(976): stopping server c85114ed5096,37629,1732247169094; all regions closed. 2024-11-22T03:47:44,303 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:44,303 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:44,303 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:44,303 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:44,304 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:44,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741834_1010 (size=3066) 2024-11-22T03:47:44,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741834_1010 (size=3066) 2024-11-22T03:47:44,315 DEBUG [RS:0;c85114ed5096:37629 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/oldWALs 2024-11-22T03:47:44,315 INFO [RS:0;c85114ed5096:37629 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c85114ed5096%2C37629%2C1732247169094.meta:.meta(num 1732247171005) 2024-11-22T03:47:44,316 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:44,316 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:44,316 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:44,316 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:44,316 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:44,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741847_1023 (size=12695) 2024-11-22T03:47:44,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741847_1023 (size=12695) 2024-11-22T03:47:44,323 DEBUG [RS:0;c85114ed5096:37629 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/oldWALs 2024-11-22T03:47:44,323 INFO [RS:0;c85114ed5096:37629 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c85114ed5096%2C37629%2C1732247169094:(num 1732247244021) 2024-11-22T03:47:44,323 DEBUG [RS:0;c85114ed5096:37629 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:47:44,323 INFO [RS:0;c85114ed5096:37629 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:47:44,323 INFO [RS:0;c85114ed5096:37629 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:47:44,323 INFO [RS:0;c85114ed5096:37629 {}] hbase.ChoreService(370): Chore service for: regionserver/c85114ed5096:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T03:47:44,324 INFO [RS:0;c85114ed5096:37629 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:47:44,324 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:47:44,324 INFO [RS:0;c85114ed5096:37629 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37629 2024-11-22T03:47:44,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:47:44,327 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x10065897e270001, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c85114ed5096,37629,1732247169094 2024-11-22T03:47:44,327 INFO [RS:0;c85114ed5096:37629 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:47:44,329 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c85114ed5096,37629,1732247169094] 2024-11-22T03:47:44,330 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c85114ed5096,37629,1732247169094 already deleted, retry=false 2024-11-22T03:47:44,330 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c85114ed5096,37629,1732247169094 expired; onlineServers=0 2024-11-22T03:47:44,330 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c85114ed5096,40393,1732247168319' ***** 2024-11-22T03:47:44,330 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T03:47:44,330 INFO [M:0;c85114ed5096:40393 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:47:44,330 INFO [M:0;c85114ed5096:40393 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:47:44,330 DEBUG [M:0;c85114ed5096:40393 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T03:47:44,331 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T03:47:44,331 DEBUG [M:0;c85114ed5096:40393 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T03:47:44,331 DEBUG [master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247170263 {}] cleaner.HFileCleaner(306): Exit Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247170263,5,FailOnTimeoutGroup] 2024-11-22T03:47:44,331 DEBUG [master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247170266 {}] cleaner.HFileCleaner(306): Exit Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247170266,5,FailOnTimeoutGroup] 2024-11-22T03:47:44,331 INFO [M:0;c85114ed5096:40393 {}] hbase.ChoreService(370): Chore service for: master/c85114ed5096:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T03:47:44,331 INFO [M:0;c85114ed5096:40393 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:47:44,331 DEBUG [M:0;c85114ed5096:40393 {}] master.HMaster(1795): Stopping service threads 2024-11-22T03:47:44,331 INFO [M:0;c85114ed5096:40393 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T03:47:44,331 INFO [M:0;c85114ed5096:40393 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:47:44,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T03:47:44,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:44,332 INFO [M:0;c85114ed5096:40393 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T03:47:44,332 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T03:47:44,332 DEBUG [M:0;c85114ed5096:40393 {}] zookeeper.ZKUtil(347): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T03:47:44,333 WARN [M:0;c85114ed5096:40393 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T03:47:44,333 INFO [M:0;c85114ed5096:40393 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/.lastflushedseqids 2024-11-22T03:47:44,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741854_1030 (size=130) 2024-11-22T03:47:44,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741854_1030 (size=130) 2024-11-22T03:47:44,346 INFO [M:0;c85114ed5096:40393 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T03:47:44,347 INFO [M:0;c85114ed5096:40393 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T03:47:44,347 DEBUG [M:0;c85114ed5096:40393 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:47:44,347 INFO [M:0;c85114ed5096:40393 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:47:44,347 DEBUG [M:0;c85114ed5096:40393 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:47:44,347 DEBUG [M:0;c85114ed5096:40393 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:47:44,347 DEBUG [M:0;c85114ed5096:40393 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:47:44,348 INFO [M:0;c85114ed5096:40393 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-22T03:47:44,366 DEBUG [M:0;c85114ed5096:40393 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7199103ca3c54a938b6bc9d3175c39b8 is 82, key is hbase:meta,,1/info:regioninfo/1732247171099/Put/seqid=0 2024-11-22T03:47:44,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741855_1031 (size=5672) 2024-11-22T03:47:44,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741855_1031 (size=5672) 2024-11-22T03:47:44,429 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x10065897e270001, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:47:44,429 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x10065897e270001, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:47:44,429 INFO [RS:0;c85114ed5096:37629 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:47:44,430 INFO [RS:0;c85114ed5096:37629 {}] regionserver.HRegionServer(1031): Exiting; stopping=c85114ed5096,37629,1732247169094; zookeeper connection closed. 2024-11-22T03:47:44,430 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@f5be0cd {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@f5be0cd 2024-11-22T03:47:44,431 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T03:47:44,509 INFO [regionserver/c85114ed5096:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:47:44,774 INFO [M:0;c85114ed5096:40393 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7199103ca3c54a938b6bc9d3175c39b8 2024-11-22T03:47:44,809 DEBUG [M:0;c85114ed5096:40393 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a377bcfe29c44fe6a915534a2bb80a2e is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732247172063/Put/seqid=0 2024-11-22T03:47:44,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741856_1032 (size=6247) 2024-11-22T03:47:44,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741856_1032 (size=6247) 2024-11-22T03:47:44,815 INFO [M:0;c85114ed5096:40393 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a377bcfe29c44fe6a915534a2bb80a2e 2024-11-22T03:47:44,822 INFO [M:0;c85114ed5096:40393 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a377bcfe29c44fe6a915534a2bb80a2e 2024-11-22T03:47:44,839 DEBUG [M:0;c85114ed5096:40393 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8b27916423b74011b52b85f627afc329 is 69, key is c85114ed5096,37629,1732247169094/rs:state/1732247170409/Put/seqid=0 2024-11-22T03:47:44,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741857_1033 (size=5156) 2024-11-22T03:47:44,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741857_1033 (size=5156) 2024-11-22T03:47:44,846 INFO [M:0;c85114ed5096:40393 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8b27916423b74011b52b85f627afc329 2024-11-22T03:47:44,868 DEBUG [M:0;c85114ed5096:40393 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f54e3980adb4492fade6252afcbf579e is 52, key is load_balancer_on/state:d/1732247171432/Put/seqid=0 2024-11-22T03:47:44,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741858_1034 (size=5056) 2024-11-22T03:47:44,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741858_1034 (size=5056) 2024-11-22T03:47:44,875 INFO [M:0;c85114ed5096:40393 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f54e3980adb4492fade6252afcbf579e 2024-11-22T03:47:44,884 DEBUG [M:0;c85114ed5096:40393 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7199103ca3c54a938b6bc9d3175c39b8 as hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7199103ca3c54a938b6bc9d3175c39b8 2024-11-22T03:47:44,891 INFO [M:0;c85114ed5096:40393 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7199103ca3c54a938b6bc9d3175c39b8, entries=8, sequenceid=59, filesize=5.5 K 2024-11-22T03:47:44,892 DEBUG [M:0;c85114ed5096:40393 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a377bcfe29c44fe6a915534a2bb80a2e as hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a377bcfe29c44fe6a915534a2bb80a2e 2024-11-22T03:47:44,899 INFO [M:0;c85114ed5096:40393 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a377bcfe29c44fe6a915534a2bb80a2e 2024-11-22T03:47:44,899 INFO [M:0;c85114ed5096:40393 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a377bcfe29c44fe6a915534a2bb80a2e, entries=6, sequenceid=59, filesize=6.1 K 2024-11-22T03:47:44,900 DEBUG [M:0;c85114ed5096:40393 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8b27916423b74011b52b85f627afc329 as hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8b27916423b74011b52b85f627afc329 2024-11-22T03:47:44,907 INFO [M:0;c85114ed5096:40393 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8b27916423b74011b52b85f627afc329, entries=1, sequenceid=59, filesize=5.0 K 2024-11-22T03:47:44,908 DEBUG [M:0;c85114ed5096:40393 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f54e3980adb4492fade6252afcbf579e as hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f54e3980adb4492fade6252afcbf579e 2024-11-22T03:47:44,915 INFO [M:0;c85114ed5096:40393 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f54e3980adb4492fade6252afcbf579e, entries=1, sequenceid=59, filesize=4.9 K 2024-11-22T03:47:44,917 INFO [M:0;c85114ed5096:40393 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 570ms, sequenceid=59, compaction requested=false 2024-11-22T03:47:44,918 INFO [M:0;c85114ed5096:40393 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:47:44,919 DEBUG [M:0;c85114ed5096:40393 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732247264347Disabling compacts and flushes for region at 1732247264347Disabling writes for close at 1732247264347Obtaining lock to block concurrent updates at 1732247264348 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732247264348Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1732247264348Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732247264349 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732247264350 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732247264366 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732247264366Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732247264788 (+422 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732247264808 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732247264808Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732247264822 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732247264838 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732247264838Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732247264853 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732247264868 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732247264868Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c6177b4: reopening flushed file at 1732247264882 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34cfc17f: reopening flushed file at 1732247264891 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11b1e1bf: reopening flushed file at 1732247264899 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@64bcbc2e: reopening flushed file at 1732247264907 (+8 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 570ms, sequenceid=59, compaction requested=false at 1732247264917 (+10 ms)Writing region close event to WAL at 1732247264918 (+1 ms)Closed at 1732247264918 2024-11-22T03:47:44,920 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:44,920 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:44,920 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:44,920 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:44,920 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:44,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36195 is added to blk_1073741830_1006 (size=27973) 2024-11-22T03:47:44,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40065 is added to blk_1073741830_1006 (size=27973) 2024-11-22T03:47:44,923 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:47:44,923 INFO [M:0;c85114ed5096:40393 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T03:47:44,924 INFO [M:0;c85114ed5096:40393 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40393 2024-11-22T03:47:44,924 INFO [M:0;c85114ed5096:40393 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:47:45,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:47:45,025 INFO [M:0;c85114ed5096:40393 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:47:45,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40393-0x10065897e270000, quorum=127.0.0.1:61678, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:47:45,036 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@590b36b7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:47:45,041 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@237fc06a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:47:45,041 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:47:45,041 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35f1cf70{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:47:45,042 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43794ae7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/hadoop.log.dir/,STOPPED} 2024-11-22T03:47:45,045 WARN [BP-1632664268-172.17.0.2-1732247165158 heartbeating to localhost/127.0.0.1:37663 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:47:45,045 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:47:45,045 WARN [BP-1632664268-172.17.0.2-1732247165158 heartbeating to localhost/127.0.0.1:37663 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1632664268-172.17.0.2-1732247165158 (Datanode Uuid ad4e545b-d304-428c-8a47-13f9db974967) service to localhost/127.0.0.1:37663 2024-11-22T03:47:45,045 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:47:45,046 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/cluster_8a641521-ea10-f02c-ee52-80b3b3d82476/data/data3/current/BP-1632664268-172.17.0.2-1732247165158 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:47:45,046 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/cluster_8a641521-ea10-f02c-ee52-80b3b3d82476/data/data4/current/BP-1632664268-172.17.0.2-1732247165158 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:47:45,047 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:47:45,053 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@11e88411{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:47:45,054 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75423500{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:47:45,054 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:47:45,054 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@621a7cbc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:47:45,054 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26c88bf4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/hadoop.log.dir/,STOPPED} 2024-11-22T03:47:45,055 WARN [BP-1632664268-172.17.0.2-1732247165158 heartbeating to localhost/127.0.0.1:37663 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:47:45,055 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:47:45,055 WARN [BP-1632664268-172.17.0.2-1732247165158 heartbeating to localhost/127.0.0.1:37663 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1632664268-172.17.0.2-1732247165158 (Datanode Uuid 2d108a64-ec22-4881-9c2a-719e630f3460) service to localhost/127.0.0.1:37663 2024-11-22T03:47:45,055 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:47:45,056 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/cluster_8a641521-ea10-f02c-ee52-80b3b3d82476/data/data1/current/BP-1632664268-172.17.0.2-1732247165158 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:47:45,056 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/cluster_8a641521-ea10-f02c-ee52-80b3b3d82476/data/data2/current/BP-1632664268-172.17.0.2-1732247165158 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:47:45,057 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:47:45,068 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6de997b9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:47:45,069 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a0da00a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:47:45,069 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:47:45,069 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@380b8195{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:47:45,069 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3aee6cb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/hadoop.log.dir/,STOPPED} 2024-11-22T03:47:45,081 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T03:47:45,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T03:47:45,119 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:37663 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:37663 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@7c71d5d8 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:37663 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37663 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37663 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:37663 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/c85114ed5096:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/c85114ed5096:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37663 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/c85114ed5096:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37663 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=146 (was 263), ProcessCount=11 (was 11), AvailableMemoryMB=3783 (was 4858) 2024-11-22T03:47:45,126 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=146, ProcessCount=11, AvailableMemoryMB=3782 2024-11-22T03:47:45,126 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T03:47:45,126 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/hadoop.log.dir so I do NOT create it in target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d 2024-11-22T03:47:45,126 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f0ccf0ba-6549-8c69-386b-09dddfedbd57/hadoop.tmp.dir so I do NOT create it in target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d 2024-11-22T03:47:45,126 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/cluster_3300ce87-5c35-b25a-98c3-4dc3d5c9cb4f, deleteOnExit=true 2024-11-22T03:47:45,126 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T03:47:45,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/test.cache.data in system properties and HBase conf 2024-11-22T03:47:45,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T03:47:45,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/hadoop.log.dir in system properties and HBase conf 2024-11-22T03:47:45,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T03:47:45,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T03:47:45,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T03:47:45,127 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T03:47:45,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:47:45,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:47:45,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T03:47:45,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:47:45,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T03:47:45,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T03:47:45,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:47:45,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:47:45,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T03:47:45,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/nfs.dump.dir in system properties and HBase conf 2024-11-22T03:47:45,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/java.io.tmpdir in system properties and HBase conf 2024-11-22T03:47:45,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:47:45,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T03:47:45,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T03:47:45,142 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:47:45,194 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:47:45,200 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:47:45,202 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:47:45,202 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:47:45,202 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:47:45,203 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:47:45,203 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6bfb1630{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:47:45,203 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@255aa000{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:47:45,302 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5f7f1f5d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/java.io.tmpdir/jetty-localhost-43805-hadoop-hdfs-3_4_1-tests_jar-_-any-16829144235469915419/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:47:45,302 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@58d6cebc{HTTP/1.1, (http/1.1)}{localhost:43805} 2024-11-22T03:47:45,302 INFO [Time-limited test {}] server.Server(415): Started @102629ms 2024-11-22T03:47:45,315 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:47:45,366 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:47:45,370 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:47:45,370 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:47:45,370 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:47:45,370 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:47:45,371 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62ef2349{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:47:45,371 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4504b856{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:47:45,467 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5686c999{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/java.io.tmpdir/jetty-localhost-44581-hadoop-hdfs-3_4_1-tests_jar-_-any-9042256316920509714/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:47:45,467 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@66e738a{HTTP/1.1, (http/1.1)}{localhost:44581} 2024-11-22T03:47:45,467 INFO [Time-limited test {}] server.Server(415): Started @102794ms 2024-11-22T03:47:45,469 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:47:45,506 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:47:45,510 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:47:45,511 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:47:45,511 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:47:45,511 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:47:45,511 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11e4312d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:47:45,512 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bfe00af{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:47:45,537 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/cluster_3300ce87-5c35-b25a-98c3-4dc3d5c9cb4f/data/data1/current/BP-203047728-172.17.0.2-1732247265153/current, will proceed with Du for space computation calculation, 2024-11-22T03:47:45,537 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/cluster_3300ce87-5c35-b25a-98c3-4dc3d5c9cb4f/data/data2/current/BP-203047728-172.17.0.2-1732247265153/current, will proceed with Du for space computation calculation, 2024-11-22T03:47:45,554 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:47:45,556 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6f6b1a324a82d15b with lease ID 0x194eec49799dd184: Processing first storage report for DS-cffbd7c7-71b1-4ecd-bc18-87e72069e647 from datanode DatanodeRegistration(127.0.0.1:37439, datanodeUuid=c063b1cc-0a3f-4f90-a91d-4351bebe0e49, infoPort=37399, infoSecurePort=0, ipcPort=46709, storageInfo=lv=-57;cid=testClusterID;nsid=1172964791;c=1732247265153) 2024-11-22T03:47:45,557 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f6b1a324a82d15b with lease ID 0x194eec49799dd184: from storage DS-cffbd7c7-71b1-4ecd-bc18-87e72069e647 node DatanodeRegistration(127.0.0.1:37439, datanodeUuid=c063b1cc-0a3f-4f90-a91d-4351bebe0e49, infoPort=37399, infoSecurePort=0, ipcPort=46709, storageInfo=lv=-57;cid=testClusterID;nsid=1172964791;c=1732247265153), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:47:45,557 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6f6b1a324a82d15b with lease ID 0x194eec49799dd184: Processing first storage report for DS-63eb955a-594a-48fa-a1b0-740a817a4262 from datanode DatanodeRegistration(127.0.0.1:37439, datanodeUuid=c063b1cc-0a3f-4f90-a91d-4351bebe0e49, infoPort=37399, infoSecurePort=0, ipcPort=46709, storageInfo=lv=-57;cid=testClusterID;nsid=1172964791;c=1732247265153) 2024-11-22T03:47:45,557 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f6b1a324a82d15b with lease ID 0x194eec49799dd184: from storage DS-63eb955a-594a-48fa-a1b0-740a817a4262 node DatanodeRegistration(127.0.0.1:37439, datanodeUuid=c063b1cc-0a3f-4f90-a91d-4351bebe0e49, infoPort=37399, infoSecurePort=0, ipcPort=46709, storageInfo=lv=-57;cid=testClusterID;nsid=1172964791;c=1732247265153), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:47:45,609 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3ff103fb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/java.io.tmpdir/jetty-localhost-40161-hadoop-hdfs-3_4_1-tests_jar-_-any-1956939255136215992/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:47:45,610 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7012f44c{HTTP/1.1, (http/1.1)}{localhost:40161} 2024-11-22T03:47:45,610 INFO [Time-limited test {}] server.Server(415): Started @102936ms 2024-11-22T03:47:45,611 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:47:45,675 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/cluster_3300ce87-5c35-b25a-98c3-4dc3d5c9cb4f/data/data3/current/BP-203047728-172.17.0.2-1732247265153/current, will proceed with Du for space computation calculation, 2024-11-22T03:47:45,676 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/cluster_3300ce87-5c35-b25a-98c3-4dc3d5c9cb4f/data/data4/current/BP-203047728-172.17.0.2-1732247265153/current, will proceed with Du for space computation calculation, 2024-11-22T03:47:45,692 WARN [Thread-452 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:47:45,695 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfadf416f512f0876 with lease ID 0x194eec49799dd185: Processing first storage report for DS-417e57de-9620-417f-bd2f-de0111668f15 from datanode DatanodeRegistration(127.0.0.1:33001, datanodeUuid=9e864513-21b1-4892-b6b1-c60b987b259d, infoPort=45925, infoSecurePort=0, ipcPort=46781, storageInfo=lv=-57;cid=testClusterID;nsid=1172964791;c=1732247265153) 2024-11-22T03:47:45,695 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfadf416f512f0876 with lease ID 0x194eec49799dd185: from storage DS-417e57de-9620-417f-bd2f-de0111668f15 node DatanodeRegistration(127.0.0.1:33001, datanodeUuid=9e864513-21b1-4892-b6b1-c60b987b259d, infoPort=45925, infoSecurePort=0, ipcPort=46781, storageInfo=lv=-57;cid=testClusterID;nsid=1172964791;c=1732247265153), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:47:45,695 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfadf416f512f0876 with lease ID 0x194eec49799dd185: Processing first storage report for DS-eb20fd01-58df-4b25-a47d-5e5d1ec8c587 from datanode DatanodeRegistration(127.0.0.1:33001, datanodeUuid=9e864513-21b1-4892-b6b1-c60b987b259d, infoPort=45925, infoSecurePort=0, ipcPort=46781, storageInfo=lv=-57;cid=testClusterID;nsid=1172964791;c=1732247265153) 2024-11-22T03:47:45,695 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfadf416f512f0876 with lease ID 0x194eec49799dd185: from storage DS-eb20fd01-58df-4b25-a47d-5e5d1ec8c587 node DatanodeRegistration(127.0.0.1:33001, datanodeUuid=9e864513-21b1-4892-b6b1-c60b987b259d, infoPort=45925, infoSecurePort=0, ipcPort=46781, storageInfo=lv=-57;cid=testClusterID;nsid=1172964791;c=1732247265153), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T03:47:45,742 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d 2024-11-22T03:47:45,748 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/cluster_3300ce87-5c35-b25a-98c3-4dc3d5c9cb4f/zookeeper_0, clientPort=49842, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/cluster_3300ce87-5c35-b25a-98c3-4dc3d5c9cb4f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/cluster_3300ce87-5c35-b25a-98c3-4dc3d5c9cb4f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T03:47:45,749 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49842 2024-11-22T03:47:45,750 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:47:45,751 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:47:45,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:47:45,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:47:45,764 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7 with version=8 2024-11-22T03:47:45,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/hbase-staging 2024-11-22T03:47:45,766 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c85114ed5096:0 server-side Connection retries=45 2024-11-22T03:47:45,766 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:47:45,766 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:47:45,767 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:47:45,767 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:47:45,767 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:47:45,767 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T03:47:45,767 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:47:45,768 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41971 2024-11-22T03:47:45,770 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41971 connecting to ZooKeeper ensemble=127.0.0.1:49842 2024-11-22T03:47:45,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:419710x0, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:47:45,774 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41971-0x100658afe410000 connected 2024-11-22T03:47:45,790 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:47:45,792 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:47:45,794 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:47:45,795 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7, hbase.cluster.distributed=false 2024-11-22T03:47:45,797 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:47:45,800 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41971 2024-11-22T03:47:45,800 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41971 2024-11-22T03:47:45,802 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41971 2024-11-22T03:47:45,803 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41971 2024-11-22T03:47:45,803 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41971 2024-11-22T03:47:45,820 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c85114ed5096:0 server-side Connection retries=45 2024-11-22T03:47:45,820 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:47:45,820 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:47:45,820 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:47:45,820 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:47:45,820 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:47:45,820 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T03:47:45,821 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:47:45,821 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45509 2024-11-22T03:47:45,823 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45509 connecting to ZooKeeper ensemble=127.0.0.1:49842 2024-11-22T03:47:45,824 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:47:45,826 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:47:45,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:455090x0, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:47:45,831 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:455090x0, quorum=127.0.0.1:49842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:47:45,831 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45509-0x100658afe410001 connected 2024-11-22T03:47:45,831 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T03:47:45,833 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T03:47:45,834 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45509-0x100658afe410001, quorum=127.0.0.1:49842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T03:47:45,835 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45509-0x100658afe410001, quorum=127.0.0.1:49842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:47:45,836 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45509 2024-11-22T03:47:45,836 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45509 2024-11-22T03:47:45,837 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45509 2024-11-22T03:47:45,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45509 2024-11-22T03:47:45,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45509 2024-11-22T03:47:45,851 DEBUG [M:0;c85114ed5096:41971 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c85114ed5096:41971 2024-11-22T03:47:45,852 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c85114ed5096,41971,1732247265766 2024-11-22T03:47:45,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45509-0x100658afe410001, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:47:45,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:47:45,854 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c85114ed5096,41971,1732247265766 2024-11-22T03:47:45,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:45,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45509-0x100658afe410001, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T03:47:45,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45509-0x100658afe410001, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:45,859 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T03:47:45,859 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c85114ed5096,41971,1732247265766 from backup master directory 2024-11-22T03:47:45,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c85114ed5096,41971,1732247265766 2024-11-22T03:47:45,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45509-0x100658afe410001, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:47:45,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:47:45,860 WARN [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:47:45,860 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c85114ed5096,41971,1732247265766 2024-11-22T03:47:45,868 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/hbase.id] with ID: 8943d8f0-091c-4805-9ebb-7337d0403fcd 2024-11-22T03:47:45,868 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/.tmp/hbase.id 2024-11-22T03:47:45,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:47:45,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:47:45,875 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/.tmp/hbase.id]:[hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/hbase.id] 2024-11-22T03:47:45,891 INFO [master/c85114ed5096:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:47:45,891 INFO [master/c85114ed5096:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T03:47:45,893 INFO [master/c85114ed5096:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-22T03:47:45,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45509-0x100658afe410001, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:45,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:45,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:47:45,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:47:45,904 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:47:45,905 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T03:47:45,905 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:47:45,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:47:45,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:47:45,917 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store 2024-11-22T03:47:45,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:47:45,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:47:45,926 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:47:45,926 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:47:45,926 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:47:45,926 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:47:45,927 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:47:45,927 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:47:45,927 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:47:45,927 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732247265926Disabling compacts and flushes for region at 1732247265926Disabling writes for close at 1732247265927 (+1 ms)Writing region close event to WAL at 1732247265927Closed at 1732247265927 2024-11-22T03:47:45,929 WARN [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/.initializing 2024-11-22T03:47:45,929 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/WALs/c85114ed5096,41971,1732247265766 2024-11-22T03:47:45,933 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C41971%2C1732247265766, suffix=, logDir=hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/WALs/c85114ed5096,41971,1732247265766, archiveDir=hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/oldWALs, maxLogs=10 2024-11-22T03:47:45,933 INFO [master/c85114ed5096:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C41971%2C1732247265766.1732247265933 2024-11-22T03:47:45,940 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/WALs/c85114ed5096,41971,1732247265766/c85114ed5096%2C41971%2C1732247265766.1732247265933 2024-11-22T03:47:45,940 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45925:45925),(127.0.0.1/127.0.0.1:37399:37399)] 2024-11-22T03:47:45,941 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:47:45,941 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:47:45,941 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:45,941 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:45,945 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:45,947 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T03:47:45,948 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:45,948 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:47:45,948 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:45,951 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T03:47:45,951 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:45,951 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:47:45,952 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:45,955 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T03:47:45,955 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:45,956 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:47:45,956 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:45,958 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T03:47:45,958 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:45,959 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:47:45,959 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:45,960 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:45,960 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:45,962 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:45,962 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:45,963 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T03:47:45,965 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:45,967 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:47:45,968 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=775753, jitterRate=-0.0135793536901474}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T03:47:45,969 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732247265942Initializing all the Stores at 1732247265943 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247265943Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247265945 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247265945Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247265945Cleaning up temporary data from old regions at 1732247265962 (+17 ms)Region opened successfully at 1732247265969 (+7 ms) 2024-11-22T03:47:45,973 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T03:47:45,978 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@327ed59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c85114ed5096/172.17.0.2:0 2024-11-22T03:47:45,979 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T03:47:45,979 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T03:47:45,979 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T03:47:45,979 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T03:47:45,980 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T03:47:45,981 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T03:47:45,981 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T03:47:45,983 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T03:47:45,985 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T03:47:45,986 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T03:47:45,986 INFO [master/c85114ed5096:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T03:47:45,987 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T03:47:45,988 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T03:47:45,988 INFO [master/c85114ed5096:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T03:47:45,990 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T03:47:45,991 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T03:47:45,992 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T03:47:45,993 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T03:47:45,995 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T03:47:45,996 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T03:47:45,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:47:45,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45509-0x100658afe410001, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:47:45,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:45,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45509-0x100658afe410001, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:45,998 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c85114ed5096,41971,1732247265766, sessionid=0x100658afe410000, setting cluster-up flag (Was=false) 2024-11-22T03:47:46,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:46,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45509-0x100658afe410001, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:46,004 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T03:47:46,005 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c85114ed5096,41971,1732247265766 2024-11-22T03:47:46,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45509-0x100658afe410001, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:46,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:46,011 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T03:47:46,012 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c85114ed5096,41971,1732247265766 2024-11-22T03:47:46,013 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T03:47:46,015 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T03:47:46,016 INFO [master/c85114ed5096:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T03:47:46,016 INFO [master/c85114ed5096:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T03:47:46,016 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c85114ed5096,41971,1732247265766 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T03:47:46,018 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:47:46,018 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:47:46,018 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:47:46,018 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:47:46,018 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c85114ed5096:0, corePoolSize=10, maxPoolSize=10 2024-11-22T03:47:46,018 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:46,018 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c85114ed5096:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:47:46,018 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:46,019 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732247296019 2024-11-22T03:47:46,019 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T03:47:46,019 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T03:47:46,019 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T03:47:46,019 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T03:47:46,019 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T03:47:46,019 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T03:47:46,020 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,020 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T03:47:46,020 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T03:47:46,020 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T03:47:46,021 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T03:47:46,021 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T03:47:46,021 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:47:46,021 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T03:47:46,021 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247266021,5,FailOnTimeoutGroup] 2024-11-22T03:47:46,021 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247266021,5,FailOnTimeoutGroup] 2024-11-22T03:47:46,021 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,021 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T03:47:46,021 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,021 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,022 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:46,022 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T03:47:46,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:47:46,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:47:46,036 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T03:47:46,037 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7 2024-11-22T03:47:46,042 INFO [RS:0;c85114ed5096:45509 {}] regionserver.HRegionServer(746): ClusterId : 8943d8f0-091c-4805-9ebb-7337d0403fcd 2024-11-22T03:47:46,042 DEBUG [RS:0;c85114ed5096:45509 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T03:47:46,044 DEBUG [RS:0;c85114ed5096:45509 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T03:47:46,044 DEBUG [RS:0;c85114ed5096:45509 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T03:47:46,046 DEBUG [RS:0;c85114ed5096:45509 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T03:47:46,047 DEBUG [RS:0;c85114ed5096:45509 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c62983a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c85114ed5096/172.17.0.2:0 2024-11-22T03:47:46,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:47:46,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:47:46,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:47:46,057 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:47:46,060 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:47:46,060 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:46,060 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:47:46,061 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:47:46,063 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:47:46,063 DEBUG [RS:0;c85114ed5096:45509 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c85114ed5096:45509 2024-11-22T03:47:46,063 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:46,063 INFO [RS:0;c85114ed5096:45509 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T03:47:46,063 INFO [RS:0;c85114ed5096:45509 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T03:47:46,063 DEBUG [RS:0;c85114ed5096:45509 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T03:47:46,064 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:47:46,064 INFO [RS:0;c85114ed5096:45509 {}] regionserver.HRegionServer(2659): reportForDuty to master=c85114ed5096,41971,1732247265766 with port=45509, startcode=1732247265819 2024-11-22T03:47:46,064 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:47:46,064 DEBUG [RS:0;c85114ed5096:45509 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T03:47:46,066 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:47:46,066 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:46,067 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:47:46,067 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57065, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T03:47:46,067 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:47:46,068 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41971 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c85114ed5096,45509,1732247265819 2024-11-22T03:47:46,068 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41971 {}] master.ServerManager(517): Registering regionserver=c85114ed5096,45509,1732247265819 2024-11-22T03:47:46,069 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:47:46,069 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:46,070 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:47:46,070 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:47:46,071 DEBUG [RS:0;c85114ed5096:45509 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7 2024-11-22T03:47:46,071 DEBUG [RS:0;c85114ed5096:45509 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37489 2024-11-22T03:47:46,071 DEBUG [RS:0;c85114ed5096:45509 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T03:47:46,072 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/data/hbase/meta/1588230740 2024-11-22T03:47:46,072 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/data/hbase/meta/1588230740 2024-11-22T03:47:46,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:47:46,073 DEBUG [RS:0;c85114ed5096:45509 {}] zookeeper.ZKUtil(111): regionserver:45509-0x100658afe410001, quorum=127.0.0.1:49842, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c85114ed5096,45509,1732247265819 2024-11-22T03:47:46,073 WARN [RS:0;c85114ed5096:45509 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:47:46,074 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:47:46,074 INFO [RS:0;c85114ed5096:45509 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:47:46,074 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:47:46,074 DEBUG [RS:0;c85114ed5096:45509 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/WALs/c85114ed5096,45509,1732247265819 2024-11-22T03:47:46,074 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:47:46,075 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:47:46,078 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c85114ed5096,45509,1732247265819] 2024-11-22T03:47:46,078 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:47:46,079 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=697487, jitterRate=-0.11310052871704102}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:47:46,080 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732247266053Initializing all the Stores at 1732247266055 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247266055Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247266057 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247266057Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247266057Cleaning up temporary data from old regions at 1732247266074 (+17 ms)Region opened successfully at 1732247266080 (+6 ms) 2024-11-22T03:47:46,080 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:47:46,081 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:47:46,081 INFO [RS:0;c85114ed5096:45509 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T03:47:46,081 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:47:46,081 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:47:46,081 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:47:46,081 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:47:46,081 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732247266080Disabling compacts and flushes for region at 1732247266080Disabling writes for close at 1732247266081 (+1 ms)Writing region close event to WAL at 1732247266081Closed at 1732247266081 2024-11-22T03:47:46,083 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:47:46,083 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T03:47:46,083 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T03:47:46,084 INFO [RS:0;c85114ed5096:45509 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T03:47:46,085 INFO [RS:0;c85114ed5096:45509 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T03:47:46,085 INFO [RS:0;c85114ed5096:45509 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,085 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:47:46,085 INFO [RS:0;c85114ed5096:45509 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T03:47:46,087 INFO [RS:0;c85114ed5096:45509 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T03:47:46,087 INFO [RS:0;c85114ed5096:45509 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,087 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T03:47:46,087 DEBUG [RS:0;c85114ed5096:45509 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:46,087 DEBUG [RS:0;c85114ed5096:45509 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:46,087 DEBUG [RS:0;c85114ed5096:45509 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:46,087 DEBUG [RS:0;c85114ed5096:45509 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:46,087 DEBUG [RS:0;c85114ed5096:45509 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:46,087 DEBUG [RS:0;c85114ed5096:45509 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c85114ed5096:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:47:46,087 DEBUG [RS:0;c85114ed5096:45509 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:46,087 DEBUG [RS:0;c85114ed5096:45509 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:46,088 DEBUG [RS:0;c85114ed5096:45509 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:46,088 DEBUG [RS:0;c85114ed5096:45509 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:46,088 DEBUG [RS:0;c85114ed5096:45509 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:46,088 DEBUG [RS:0;c85114ed5096:45509 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:46,088 DEBUG [RS:0;c85114ed5096:45509 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c85114ed5096:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:47:46,088 DEBUG [RS:0;c85114ed5096:45509 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:47:46,093 INFO [RS:0;c85114ed5096:45509 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,093 INFO [RS:0;c85114ed5096:45509 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,093 INFO [RS:0;c85114ed5096:45509 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,093 INFO [RS:0;c85114ed5096:45509 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,093 INFO [RS:0;c85114ed5096:45509 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,093 INFO [RS:0;c85114ed5096:45509 {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,45509,1732247265819-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:47:46,108 INFO [RS:0;c85114ed5096:45509 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T03:47:46,108 INFO [RS:0;c85114ed5096:45509 {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,45509,1732247265819-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,108 INFO [RS:0;c85114ed5096:45509 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,109 INFO [RS:0;c85114ed5096:45509 {}] regionserver.Replication(171): c85114ed5096,45509,1732247265819 started 2024-11-22T03:47:46,122 INFO [RS:0;c85114ed5096:45509 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,122 INFO [RS:0;c85114ed5096:45509 {}] regionserver.HRegionServer(1482): Serving as c85114ed5096,45509,1732247265819, RpcServer on c85114ed5096/172.17.0.2:45509, sessionid=0x100658afe410001 2024-11-22T03:47:46,122 DEBUG [RS:0;c85114ed5096:45509 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T03:47:46,122 DEBUG [RS:0;c85114ed5096:45509 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c85114ed5096,45509,1732247265819 2024-11-22T03:47:46,122 DEBUG [RS:0;c85114ed5096:45509 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c85114ed5096,45509,1732247265819' 2024-11-22T03:47:46,122 DEBUG [RS:0;c85114ed5096:45509 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T03:47:46,123 DEBUG [RS:0;c85114ed5096:45509 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T03:47:46,124 DEBUG [RS:0;c85114ed5096:45509 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T03:47:46,124 DEBUG [RS:0;c85114ed5096:45509 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T03:47:46,124 DEBUG [RS:0;c85114ed5096:45509 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c85114ed5096,45509,1732247265819 2024-11-22T03:47:46,124 DEBUG [RS:0;c85114ed5096:45509 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c85114ed5096,45509,1732247265819' 2024-11-22T03:47:46,124 DEBUG [RS:0;c85114ed5096:45509 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T03:47:46,124 DEBUG [RS:0;c85114ed5096:45509 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T03:47:46,125 DEBUG [RS:0;c85114ed5096:45509 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T03:47:46,125 INFO [RS:0;c85114ed5096:45509 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T03:47:46,125 INFO [RS:0;c85114ed5096:45509 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T03:47:46,231 INFO [RS:0;c85114ed5096:45509 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C45509%2C1732247265819, suffix=, logDir=hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/WALs/c85114ed5096,45509,1732247265819, archiveDir=hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/oldWALs, maxLogs=32 2024-11-22T03:47:46,237 WARN [c85114ed5096:41971 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T03:47:46,237 INFO [RS:0;c85114ed5096:45509 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C45509%2C1732247265819.1732247266237 2024-11-22T03:47:46,244 INFO [RS:0;c85114ed5096:45509 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/WALs/c85114ed5096,45509,1732247265819/c85114ed5096%2C45509%2C1732247265819.1732247266237 2024-11-22T03:47:46,245 DEBUG [RS:0;c85114ed5096:45509 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37399:37399),(127.0.0.1/127.0.0.1:45925:45925)] 2024-11-22T03:47:46,487 DEBUG [c85114ed5096:41971 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T03:47:46,489 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c85114ed5096,45509,1732247265819 2024-11-22T03:47:46,493 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c85114ed5096,45509,1732247265819, state=OPENING 2024-11-22T03:47:46,496 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T03:47:46,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:46,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45509-0x100658afe410001, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:46,499 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:47:46,499 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c85114ed5096,45509,1732247265819}] 2024-11-22T03:47:46,499 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:47:46,499 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:47:46,655 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T03:47:46,661 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51249, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T03:47:46,666 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T03:47:46,667 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:47:46,669 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C45509%2C1732247265819.meta, suffix=.meta, logDir=hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/WALs/c85114ed5096,45509,1732247265819, archiveDir=hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/oldWALs, maxLogs=32 2024-11-22T03:47:46,671 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C45509%2C1732247265819.meta.1732247266671.meta 2024-11-22T03:47:46,677 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/WALs/c85114ed5096,45509,1732247265819/c85114ed5096%2C45509%2C1732247265819.meta.1732247266671.meta 2024-11-22T03:47:46,680 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37399:37399),(127.0.0.1/127.0.0.1:45925:45925)] 2024-11-22T03:47:46,682 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:47:46,682 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T03:47:46,682 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T03:47:46,682 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T03:47:46,683 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T03:47:46,683 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:47:46,683 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T03:47:46,683 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T03:47:46,685 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:47:46,686 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:47:46,686 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:46,686 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:47:46,687 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:47:46,688 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:47:46,688 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:46,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:47:46,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:47:46,689 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:47:46,689 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:46,690 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:47:46,690 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:47:46,691 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:47:46,691 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:46,691 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:47:46,692 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:47:46,693 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/data/hbase/meta/1588230740 2024-11-22T03:47:46,694 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/data/hbase/meta/1588230740 2024-11-22T03:47:46,696 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:47:46,696 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:47:46,697 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:47:46,698 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:47:46,699 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=789809, jitterRate=0.004295095801353455}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:47:46,699 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T03:47:46,700 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732247266683Writing region info on filesystem at 1732247266683Initializing all the Stores at 1732247266684 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247266684Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247266685 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247266685Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247266685Cleaning up temporary data from old regions at 1732247266696 (+11 ms)Running coprocessor post-open hooks at 1732247266700 (+4 ms)Region opened successfully at 1732247266700 2024-11-22T03:47:46,702 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732247266654 2024-11-22T03:47:46,705 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T03:47:46,705 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T03:47:46,706 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c85114ed5096,45509,1732247265819 2024-11-22T03:47:46,707 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c85114ed5096,45509,1732247265819, state=OPEN 2024-11-22T03:47:46,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:47:46,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45509-0x100658afe410001, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:47:46,709 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c85114ed5096,45509,1732247265819 2024-11-22T03:47:46,709 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:47:46,709 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:47:46,712 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T03:47:46,712 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c85114ed5096,45509,1732247265819 in 210 msec 2024-11-22T03:47:46,716 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T03:47:46,716 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 629 msec 2024-11-22T03:47:46,717 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:47:46,717 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T03:47:46,718 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:47:46,718 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c85114ed5096,45509,1732247265819, seqNum=-1] 2024-11-22T03:47:46,719 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:47:46,720 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42387, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:47:46,727 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 710 msec 2024-11-22T03:47:46,727 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732247266727, completionTime=-1 2024-11-22T03:47:46,727 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T03:47:46,727 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T03:47:46,729 INFO [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T03:47:46,729 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732247326729 2024-11-22T03:47:46,729 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732247386729 2024-11-22T03:47:46,729 INFO [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T03:47:46,730 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,41971,1732247265766-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,730 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,41971,1732247265766-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,730 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,41971,1732247265766-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,730 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c85114ed5096:41971, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,730 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,730 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,732 DEBUG [master/c85114ed5096:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T03:47:46,735 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.874sec 2024-11-22T03:47:46,735 INFO [master/c85114ed5096:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T03:47:46,735 INFO [master/c85114ed5096:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T03:47:46,736 INFO [master/c85114ed5096:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T03:47:46,736 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T03:47:46,736 INFO [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T03:47:46,736 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,41971,1732247265766-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:47:46,736 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,41971,1732247265766-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T03:47:46,738 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T03:47:46,738 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T03:47:46,739 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,41971,1732247265766-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:46,743 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47a55394, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:47:46,743 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c85114ed5096,41971,-1 for getting cluster id 2024-11-22T03:47:46,743 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T03:47:46,745 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8943d8f0-091c-4805-9ebb-7337d0403fcd' 2024-11-22T03:47:46,745 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T03:47:46,745 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8943d8f0-091c-4805-9ebb-7337d0403fcd" 2024-11-22T03:47:46,746 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b11094c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:47:46,746 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c85114ed5096,41971,-1] 2024-11-22T03:47:46,746 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T03:47:46,747 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:47:46,748 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52340, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T03:47:46,749 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1899f674, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:47:46,750 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:47:46,751 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c85114ed5096,45509,1732247265819, seqNum=-1] 2024-11-22T03:47:46,751 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:47:46,753 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34178, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:47:46,755 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c85114ed5096,41971,1732247265766 2024-11-22T03:47:46,756 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:47:46,759 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T03:47:46,760 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T03:47:46,760 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:47:46,760 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:47:46,761 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:47:46,761 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:47:46,761 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T03:47:46,761 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T03:47:46,761 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=970899921, stopped=false 2024-11-22T03:47:46,761 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c85114ed5096,41971,1732247265766 2024-11-22T03:47:46,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:47:46,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:46,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45509-0x100658afe410001, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:47:46,762 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:47:46,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45509-0x100658afe410001, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:46,762 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:47:46,763 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:47:46,763 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:47:46,763 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:47:46,763 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c85114ed5096,45509,1732247265819' ***** 2024-11-22T03:47:46,763 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T03:47:46,763 INFO [RS:0;c85114ed5096:45509 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T03:47:46,763 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45509-0x100658afe410001, quorum=127.0.0.1:49842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:47:46,763 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T03:47:46,764 INFO [RS:0;c85114ed5096:45509 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T03:47:46,764 INFO [RS:0;c85114ed5096:45509 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T03:47:46,764 INFO [RS:0;c85114ed5096:45509 {}] regionserver.HRegionServer(959): stopping server c85114ed5096,45509,1732247265819 2024-11-22T03:47:46,764 INFO [RS:0;c85114ed5096:45509 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:47:46,764 INFO [RS:0;c85114ed5096:45509 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c85114ed5096:45509. 2024-11-22T03:47:46,764 DEBUG [RS:0;c85114ed5096:45509 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:47:46,764 DEBUG [RS:0;c85114ed5096:45509 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:47:46,764 INFO [RS:0;c85114ed5096:45509 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T03:47:46,764 INFO [RS:0;c85114ed5096:45509 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T03:47:46,764 INFO [RS:0;c85114ed5096:45509 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T03:47:46,764 INFO [RS:0;c85114ed5096:45509 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T03:47:46,765 INFO [RS:0;c85114ed5096:45509 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-22T03:47:46,765 DEBUG [RS:0;c85114ed5096:45509 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-22T03:47:46,765 DEBUG [RS:0;c85114ed5096:45509 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-22T03:47:46,765 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:47:46,765 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:47:46,765 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:47:46,765 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:47:46,765 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:47:46,765 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-22T03:47:46,783 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/data/hbase/meta/1588230740/.tmp/ns/6d022a9936e041e2b76f78327bdcadf7 is 43, key is default/ns:d/1732247266721/Put/seqid=0 2024-11-22T03:47:46,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741835_1011 (size=5153) 2024-11-22T03:47:46,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741835_1011 (size=5153) 2024-11-22T03:47:46,790 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/data/hbase/meta/1588230740/.tmp/ns/6d022a9936e041e2b76f78327bdcadf7 2024-11-22T03:47:46,800 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/data/hbase/meta/1588230740/.tmp/ns/6d022a9936e041e2b76f78327bdcadf7 as hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/data/hbase/meta/1588230740/ns/6d022a9936e041e2b76f78327bdcadf7 2024-11-22T03:47:46,809 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/data/hbase/meta/1588230740/ns/6d022a9936e041e2b76f78327bdcadf7, entries=2, sequenceid=6, filesize=5.0 K 2024-11-22T03:47:46,810 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 45ms, sequenceid=6, compaction requested=false 2024-11-22T03:47:46,811 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T03:47:46,817 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T03:47:46,818 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:47:46,818 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:47:46,819 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732247266765Running coprocessor pre-close hooks at 1732247266765Disabling compacts and flushes for region at 1732247266765Disabling writes for close at 1732247266765Obtaining lock to block concurrent updates at 1732247266765Preparing flush snapshotting stores in 1588230740 at 1732247266765Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732247266766 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732247266767 (+1 ms)Flushing 1588230740/ns: creating writer at 1732247266767Flushing 1588230740/ns: appending metadata at 1732247266783 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732247266783Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76c132bc: reopening flushed file at 1732247266798 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 45ms, sequenceid=6, compaction requested=false at 1732247266811 (+13 ms)Writing region close event to WAL at 1732247266812 (+1 ms)Running coprocessor post-close hooks at 1732247266818 (+6 ms)Closed at 1732247266818 2024-11-22T03:47:46,819 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T03:47:46,965 INFO [RS:0;c85114ed5096:45509 {}] regionserver.HRegionServer(976): stopping server c85114ed5096,45509,1732247265819; all regions closed. 2024-11-22T03:47:46,966 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:46,966 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:46,966 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:46,966 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:46,966 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:46,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741834_1010 (size=1152) 2024-11-22T03:47:46,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741834_1010 (size=1152) 2024-11-22T03:47:46,972 DEBUG [RS:0;c85114ed5096:45509 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/oldWALs 2024-11-22T03:47:46,972 INFO [RS:0;c85114ed5096:45509 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c85114ed5096%2C45509%2C1732247265819.meta:.meta(num 1732247266671) 2024-11-22T03:47:46,972 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:46,972 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:46,972 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:46,973 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:46,973 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:46,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741833_1009 (size=93) 2024-11-22T03:47:46,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741833_1009 (size=93) 2024-11-22T03:47:46,978 DEBUG [RS:0;c85114ed5096:45509 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/oldWALs 2024-11-22T03:47:46,978 INFO [RS:0;c85114ed5096:45509 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c85114ed5096%2C45509%2C1732247265819:(num 1732247266237) 2024-11-22T03:47:46,978 DEBUG [RS:0;c85114ed5096:45509 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:47:46,978 INFO [RS:0;c85114ed5096:45509 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:47:46,978 INFO [RS:0;c85114ed5096:45509 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:47:46,978 INFO [RS:0;c85114ed5096:45509 {}] hbase.ChoreService(370): Chore service for: regionserver/c85114ed5096:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T03:47:46,978 INFO [RS:0;c85114ed5096:45509 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:47:46,978 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:47:46,978 INFO [RS:0;c85114ed5096:45509 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45509 2024-11-22T03:47:46,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:47:46,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45509-0x100658afe410001, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c85114ed5096,45509,1732247265819 2024-11-22T03:47:46,980 INFO [RS:0;c85114ed5096:45509 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:47:46,980 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c85114ed5096,45509,1732247265819] 2024-11-22T03:47:46,981 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c85114ed5096,45509,1732247265819 already deleted, retry=false 2024-11-22T03:47:46,981 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c85114ed5096,45509,1732247265819 expired; onlineServers=0 2024-11-22T03:47:46,981 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c85114ed5096,41971,1732247265766' ***** 2024-11-22T03:47:46,981 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T03:47:46,981 INFO [M:0;c85114ed5096:41971 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:47:46,981 INFO [M:0;c85114ed5096:41971 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:47:46,982 DEBUG [M:0;c85114ed5096:41971 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T03:47:46,982 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T03:47:46,982 DEBUG [M:0;c85114ed5096:41971 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T03:47:46,982 DEBUG [master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247266021 {}] cleaner.HFileCleaner(306): Exit Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247266021,5,FailOnTimeoutGroup] 2024-11-22T03:47:46,982 DEBUG [master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247266021 {}] cleaner.HFileCleaner(306): Exit Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247266021,5,FailOnTimeoutGroup] 2024-11-22T03:47:46,982 INFO [M:0;c85114ed5096:41971 {}] hbase.ChoreService(370): Chore service for: master/c85114ed5096:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T03:47:46,982 INFO [M:0;c85114ed5096:41971 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:47:46,982 DEBUG [M:0;c85114ed5096:41971 {}] master.HMaster(1795): Stopping service threads 2024-11-22T03:47:46,982 INFO [M:0;c85114ed5096:41971 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T03:47:46,982 INFO [M:0;c85114ed5096:41971 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:47:46,982 INFO [M:0;c85114ed5096:41971 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T03:47:46,983 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T03:47:46,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T03:47:46,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:46,983 DEBUG [M:0;c85114ed5096:41971 {}] zookeeper.ZKUtil(347): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T03:47:46,983 WARN [M:0;c85114ed5096:41971 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T03:47:46,984 INFO [M:0;c85114ed5096:41971 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/.lastflushedseqids 2024-11-22T03:47:46,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741836_1012 (size=99) 2024-11-22T03:47:46,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741836_1012 (size=99) 2024-11-22T03:47:46,990 INFO [M:0;c85114ed5096:41971 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T03:47:46,990 INFO [M:0;c85114ed5096:41971 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T03:47:46,990 DEBUG [M:0;c85114ed5096:41971 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:47:46,990 INFO [M:0;c85114ed5096:41971 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:47:46,990 DEBUG [M:0;c85114ed5096:41971 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:47:46,990 DEBUG [M:0;c85114ed5096:41971 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:47:46,990 DEBUG [M:0;c85114ed5096:41971 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:47:46,991 INFO [M:0;c85114ed5096:41971 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-22T03:47:47,006 DEBUG [M:0;c85114ed5096:41971 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/36e801b2e8cf45c683aab31e50bd3d41 is 82, key is hbase:meta,,1/info:regioninfo/1732247266706/Put/seqid=0 2024-11-22T03:47:47,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741837_1013 (size=5672) 2024-11-22T03:47:47,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741837_1013 (size=5672) 2024-11-22T03:47:47,012 INFO [M:0;c85114ed5096:41971 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/36e801b2e8cf45c683aab31e50bd3d41 2024-11-22T03:47:47,033 DEBUG [M:0;c85114ed5096:41971 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/68eb34eeef3a4bd7b924b9db47e69bbc is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732247266726/Put/seqid=0 2024-11-22T03:47:47,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741838_1014 (size=5275) 2024-11-22T03:47:47,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741838_1014 (size=5275) 2024-11-22T03:47:47,039 INFO [M:0;c85114ed5096:41971 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/68eb34eeef3a4bd7b924b9db47e69bbc 2024-11-22T03:47:47,061 DEBUG [M:0;c85114ed5096:41971 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/33406de2194546028f964526f7f3f7bf is 69, key is c85114ed5096,45509,1732247265819/rs:state/1732247266069/Put/seqid=0 2024-11-22T03:47:47,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741839_1015 (size=5156) 2024-11-22T03:47:47,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741839_1015 (size=5156) 2024-11-22T03:47:47,067 INFO [M:0;c85114ed5096:41971 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/33406de2194546028f964526f7f3f7bf 2024-11-22T03:47:47,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45509-0x100658afe410001, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:47:47,081 INFO [RS:0;c85114ed5096:45509 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:47:47,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45509-0x100658afe410001, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:47:47,081 INFO [RS:0;c85114ed5096:45509 {}] regionserver.HRegionServer(1031): Exiting; stopping=c85114ed5096,45509,1732247265819; zookeeper connection closed. 2024-11-22T03:47:47,081 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4cfdca90 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4cfdca90 2024-11-22T03:47:47,082 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T03:47:47,089 DEBUG [M:0;c85114ed5096:41971 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/cde9f542aede45229af5eafc89ec5cb0 is 52, key is load_balancer_on/state:d/1732247266758/Put/seqid=0 2024-11-22T03:47:47,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741840_1016 (size=5056) 2024-11-22T03:47:47,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741840_1016 (size=5056) 2024-11-22T03:47:47,095 INFO [M:0;c85114ed5096:41971 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/cde9f542aede45229af5eafc89ec5cb0 2024-11-22T03:47:47,102 DEBUG [M:0;c85114ed5096:41971 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/36e801b2e8cf45c683aab31e50bd3d41 as hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/36e801b2e8cf45c683aab31e50bd3d41 2024-11-22T03:47:47,109 INFO [M:0;c85114ed5096:41971 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/36e801b2e8cf45c683aab31e50bd3d41, entries=8, sequenceid=29, filesize=5.5 K 2024-11-22T03:47:47,111 DEBUG [M:0;c85114ed5096:41971 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/68eb34eeef3a4bd7b924b9db47e69bbc as hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/68eb34eeef3a4bd7b924b9db47e69bbc 2024-11-22T03:47:47,118 INFO [M:0;c85114ed5096:41971 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/68eb34eeef3a4bd7b924b9db47e69bbc, entries=3, sequenceid=29, filesize=5.2 K 2024-11-22T03:47:47,120 DEBUG [M:0;c85114ed5096:41971 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/33406de2194546028f964526f7f3f7bf as hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/33406de2194546028f964526f7f3f7bf 2024-11-22T03:47:47,127 INFO [M:0;c85114ed5096:41971 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/33406de2194546028f964526f7f3f7bf, entries=1, sequenceid=29, filesize=5.0 K 2024-11-22T03:47:47,128 DEBUG [M:0;c85114ed5096:41971 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/cde9f542aede45229af5eafc89ec5cb0 as hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/cde9f542aede45229af5eafc89ec5cb0 2024-11-22T03:47:47,135 INFO [M:0;c85114ed5096:41971 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37489/user/jenkins/test-data/3d4511c1-377a-055e-cf24-b2e56fbbc2b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/cde9f542aede45229af5eafc89ec5cb0, entries=1, sequenceid=29, filesize=4.9 K 2024-11-22T03:47:47,137 INFO [M:0;c85114ed5096:41971 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=29, compaction requested=false 2024-11-22T03:47:47,139 INFO [M:0;c85114ed5096:41971 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:47:47,139 DEBUG [M:0;c85114ed5096:41971 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732247266990Disabling compacts and flushes for region at 1732247266990Disabling writes for close at 1732247266990Obtaining lock to block concurrent updates at 1732247266991 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732247266991Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732247266991Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732247266992 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732247266992Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732247267006 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732247267006Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732247267017 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732247267032 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732247267032Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732247267044 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732247267060 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732247267060Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732247267073 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732247267088 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732247267088Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6744afec: reopening flushed file at 1732247267100 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27dd3fde: reopening flushed file at 1732247267109 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5ef1138b: reopening flushed file at 1732247267118 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6be72f74: reopening flushed file at 1732247267127 (+9 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=29, compaction requested=false at 1732247267137 (+10 ms)Writing region close event to WAL at 1732247267138 (+1 ms)Closed at 1732247267138 2024-11-22T03:47:47,140 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:47,140 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:47,140 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:47,141 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:47,141 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:47,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741830_1006 (size=10311) 2024-11-22T03:47:47,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741830_1006 (size=10311) 2024-11-22T03:47:47,144 INFO [M:0;c85114ed5096:41971 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T03:47:47,144 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:47:47,144 INFO [M:0;c85114ed5096:41971 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41971 2024-11-22T03:47:47,144 INFO [M:0;c85114ed5096:41971 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:47:47,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:47:47,246 INFO [M:0;c85114ed5096:41971 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:47:47,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41971-0x100658afe410000, quorum=127.0.0.1:49842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:47:47,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3ff103fb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:47:47,250 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7012f44c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:47:47,250 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:47:47,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bfe00af{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:47:47,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11e4312d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/hadoop.log.dir/,STOPPED} 2024-11-22T03:47:47,252 WARN [BP-203047728-172.17.0.2-1732247265153 heartbeating to localhost/127.0.0.1:37489 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:47:47,252 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:47:47,252 WARN [BP-203047728-172.17.0.2-1732247265153 heartbeating to localhost/127.0.0.1:37489 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-203047728-172.17.0.2-1732247265153 (Datanode Uuid 9e864513-21b1-4892-b6b1-c60b987b259d) service to localhost/127.0.0.1:37489 2024-11-22T03:47:47,252 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:47:47,252 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/cluster_3300ce87-5c35-b25a-98c3-4dc3d5c9cb4f/data/data3/current/BP-203047728-172.17.0.2-1732247265153 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:47:47,252 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/cluster_3300ce87-5c35-b25a-98c3-4dc3d5c9cb4f/data/data4/current/BP-203047728-172.17.0.2-1732247265153 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:47:47,253 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:47:47,254 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5686c999{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:47:47,255 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@66e738a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:47:47,255 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:47:47,255 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4504b856{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:47:47,255 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62ef2349{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/hadoop.log.dir/,STOPPED} 2024-11-22T03:47:47,256 WARN [BP-203047728-172.17.0.2-1732247265153 heartbeating to localhost/127.0.0.1:37489 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:47:47,256 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:47:47,256 WARN [BP-203047728-172.17.0.2-1732247265153 heartbeating to localhost/127.0.0.1:37489 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-203047728-172.17.0.2-1732247265153 (Datanode Uuid c063b1cc-0a3f-4f90-a91d-4351bebe0e49) service to localhost/127.0.0.1:37489 2024-11-22T03:47:47,256 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:47:47,257 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/cluster_3300ce87-5c35-b25a-98c3-4dc3d5c9cb4f/data/data1/current/BP-203047728-172.17.0.2-1732247265153 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:47:47,257 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/cluster_3300ce87-5c35-b25a-98c3-4dc3d5c9cb4f/data/data2/current/BP-203047728-172.17.0.2-1732247265153 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:47:47,257 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:47:47,262 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5f7f1f5d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:47:47,263 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@58d6cebc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:47:47,263 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:47:47,263 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@255aa000{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:47:47,263 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6bfb1630{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/hadoop.log.dir/,STOPPED} 2024-11-22T03:47:47,269 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T03:47:47,284 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T03:47:47,285 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T03:47:47,285 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/hadoop.log.dir so I do NOT create it in target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a 2024-11-22T03:47:47,285 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3ac1ad78-f84f-b54d-2c4a-d2717caf703d/hadoop.tmp.dir so I do NOT create it in target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a 2024-11-22T03:47:47,285 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819, deleteOnExit=true 2024-11-22T03:47:47,285 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T03:47:47,285 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/test.cache.data in system properties and HBase conf 2024-11-22T03:47:47,285 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T03:47:47,285 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/hadoop.log.dir in system properties and HBase conf 2024-11-22T03:47:47,285 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T03:47:47,285 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T03:47:47,285 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T03:47:47,286 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T03:47:47,286 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:47:47,286 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:47:47,286 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T03:47:47,286 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:47:47,286 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T03:47:47,286 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T03:47:47,286 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:47:47,286 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:47:47,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T03:47:47,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/nfs.dump.dir in system properties and HBase conf 2024-11-22T03:47:47,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/java.io.tmpdir in system properties and HBase conf 2024-11-22T03:47:47,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:47:47,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T03:47:47,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T03:47:47,299 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:47:47,350 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:47:47,356 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:47:47,357 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:47:47,357 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:47:47,357 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:47:47,358 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:47:47,359 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f4b9244{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:47:47,359 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bc4af61{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:47:47,453 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1283c476{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/java.io.tmpdir/jetty-localhost-33927-hadoop-hdfs-3_4_1-tests_jar-_-any-14371723200881202417/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:47:47,454 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@74c4bb5f{HTTP/1.1, (http/1.1)}{localhost:33927} 2024-11-22T03:47:47,454 INFO [Time-limited test {}] server.Server(415): Started @104781ms 2024-11-22T03:47:47,467 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:47:47,514 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:47:47,518 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:47:47,519 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:47:47,519 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:47:47,519 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:47:47,520 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f0429fe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:47:47,520 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c6cae60{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:47:47,613 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@41fecd82{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/java.io.tmpdir/jetty-localhost-40157-hadoop-hdfs-3_4_1-tests_jar-_-any-11679005730620181125/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:47:47,613 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@54e2df40{HTTP/1.1, (http/1.1)}{localhost:40157} 2024-11-22T03:47:47,614 INFO [Time-limited test {}] server.Server(415): Started @104940ms 2024-11-22T03:47:47,615 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:47:47,646 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:47:47,650 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:47:47,651 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:47:47,651 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:47:47,651 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:47:47,652 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32ba89de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:47:47,653 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d1555bb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:47:47,677 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data2/current/BP-1396528010-172.17.0.2-1732247267310/current, will proceed with Du for space computation calculation, 2024-11-22T03:47:47,677 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data1/current/BP-1396528010-172.17.0.2-1732247267310/current, will proceed with Du for space computation calculation, 2024-11-22T03:47:47,698 WARN [Thread-635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:47:47,701 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8ba85a3212604154 with lease ID 0x8ff38ce0f6701b17: Processing first storage report for DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2 from datanode DatanodeRegistration(127.0.0.1:34635, datanodeUuid=8a296d4b-845c-407a-bc72-2ef38bd79c2d, infoPort=44871, infoSecurePort=0, ipcPort=34719, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310) 2024-11-22T03:47:47,701 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ba85a3212604154 with lease ID 0x8ff38ce0f6701b17: from storage DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2 node DatanodeRegistration(127.0.0.1:34635, datanodeUuid=8a296d4b-845c-407a-bc72-2ef38bd79c2d, infoPort=44871, infoSecurePort=0, ipcPort=34719, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:47:47,701 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8ba85a3212604154 with lease ID 0x8ff38ce0f6701b17: Processing first storage report for DS-831c14a2-d526-4aee-8863-12ec089b1b7e from datanode DatanodeRegistration(127.0.0.1:34635, datanodeUuid=8a296d4b-845c-407a-bc72-2ef38bd79c2d, infoPort=44871, infoSecurePort=0, ipcPort=34719, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310) 2024-11-22T03:47:47,701 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ba85a3212604154 with lease ID 0x8ff38ce0f6701b17: from storage DS-831c14a2-d526-4aee-8863-12ec089b1b7e node DatanodeRegistration(127.0.0.1:34635, datanodeUuid=8a296d4b-845c-407a-bc72-2ef38bd79c2d, infoPort=44871, infoSecurePort=0, ipcPort=34719, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:47:47,757 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@202b01d5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/java.io.tmpdir/jetty-localhost-41195-hadoop-hdfs-3_4_1-tests_jar-_-any-14131916973439544208/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:47:47,757 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@100caf4c{HTTP/1.1, (http/1.1)}{localhost:41195} 2024-11-22T03:47:47,757 INFO [Time-limited test {}] server.Server(415): Started @105084ms 2024-11-22T03:47:47,758 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:47:47,820 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data4/current/BP-1396528010-172.17.0.2-1732247267310/current, will proceed with Du for space computation calculation, 2024-11-22T03:47:47,820 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data3/current/BP-1396528010-172.17.0.2-1732247267310/current, will proceed with Du for space computation calculation, 2024-11-22T03:47:47,841 WARN [Thread-671 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:47:47,843 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe2b4bad845b2d223 with lease ID 0x8ff38ce0f6701b18: Processing first storage report for DS-5ddb5d59-a71a-4043-9647-298809519964 from datanode DatanodeRegistration(127.0.0.1:41395, datanodeUuid=4f278f4c-34ba-4fc6-8845-7d0a79f5af99, infoPort=38179, infoSecurePort=0, ipcPort=35435, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310) 2024-11-22T03:47:47,843 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe2b4bad845b2d223 with lease ID 0x8ff38ce0f6701b18: from storage DS-5ddb5d59-a71a-4043-9647-298809519964 node DatanodeRegistration(127.0.0.1:41395, datanodeUuid=4f278f4c-34ba-4fc6-8845-7d0a79f5af99, infoPort=38179, infoSecurePort=0, ipcPort=35435, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:47:47,844 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe2b4bad845b2d223 with lease ID 0x8ff38ce0f6701b18: Processing first storage report for DS-aefb9566-241f-4bc5-91f0-a57efdf3aa4d from datanode DatanodeRegistration(127.0.0.1:41395, datanodeUuid=4f278f4c-34ba-4fc6-8845-7d0a79f5af99, infoPort=38179, infoSecurePort=0, ipcPort=35435, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310) 2024-11-22T03:47:47,844 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe2b4bad845b2d223 with lease ID 0x8ff38ce0f6701b18: from storage DS-aefb9566-241f-4bc5-91f0-a57efdf3aa4d node DatanodeRegistration(127.0.0.1:41395, datanodeUuid=4f278f4c-34ba-4fc6-8845-7d0a79f5af99, infoPort=38179, infoSecurePort=0, ipcPort=35435, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:47:47,889 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a 2024-11-22T03:47:47,894 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/zookeeper_0, clientPort=57058, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T03:47:47,895 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57058 2024-11-22T03:47:47,895 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:47:47,898 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:47:47,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:47:47,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41395 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:47:47,911 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360 with version=8 2024-11-22T03:47:47,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/hbase-staging 2024-11-22T03:47:47,914 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c85114ed5096:0 server-side Connection retries=45 2024-11-22T03:47:47,914 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:47:47,914 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:47:47,914 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:47:47,914 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:47:47,914 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:47:47,914 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T03:47:47,915 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:47:47,915 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39239 2024-11-22T03:47:47,917 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39239 connecting to ZooKeeper ensemble=127.0.0.1:57058 2024-11-22T03:47:47,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:392390x0, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:47:47,922 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39239-0x100658b06a30000 connected 2024-11-22T03:47:47,934 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:47:47,936 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:47:47,939 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:47:47,939 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360, hbase.cluster.distributed=false 2024-11-22T03:47:47,940 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:47:47,942 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39239 2024-11-22T03:47:47,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39239 2024-11-22T03:47:47,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39239 2024-11-22T03:47:47,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39239 2024-11-22T03:47:47,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39239 2024-11-22T03:47:47,958 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c85114ed5096:0 server-side Connection retries=45 2024-11-22T03:47:47,958 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:47:47,958 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:47:47,958 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:47:47,958 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:47:47,958 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:47:47,958 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T03:47:47,958 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:47:47,959 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35197 2024-11-22T03:47:47,960 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35197 connecting to ZooKeeper ensemble=127.0.0.1:57058 2024-11-22T03:47:47,961 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:47:47,962 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:47:47,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:351970x0, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:47:47,966 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35197-0x100658b06a30001 connected 2024-11-22T03:47:47,966 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:47:47,967 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T03:47:47,967 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T03:47:47,968 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T03:47:47,969 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:47:47,971 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35197 2024-11-22T03:47:47,971 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35197 2024-11-22T03:47:47,971 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35197 2024-11-22T03:47:47,972 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35197 2024-11-22T03:47:47,972 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35197 2024-11-22T03:47:47,985 DEBUG [M:0;c85114ed5096:39239 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c85114ed5096:39239 2024-11-22T03:47:47,985 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c85114ed5096,39239,1732247267914 2024-11-22T03:47:47,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:47:47,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:47:47,987 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c85114ed5096,39239,1732247267914 2024-11-22T03:47:47,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T03:47:47,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:47,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:47,988 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T03:47:47,989 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c85114ed5096,39239,1732247267914 from backup master directory 2024-11-22T03:47:47,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c85114ed5096,39239,1732247267914 2024-11-22T03:47:47,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:47:47,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:47:47,990 WARN [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:47:47,990 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c85114ed5096,39239,1732247267914 2024-11-22T03:47:47,995 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/hbase.id] with ID: 848e7b6c-b358-43e4-9220-2fbd73c20848 2024-11-22T03:47:47,995 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/.tmp/hbase.id 2024-11-22T03:47:48,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:47:48,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41395 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:47:48,002 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/.tmp/hbase.id]:[hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/hbase.id] 2024-11-22T03:47:48,016 INFO [master/c85114ed5096:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:47:48,016 INFO [master/c85114ed5096:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T03:47:48,018 INFO [master/c85114ed5096:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-22T03:47:48,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:48,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:48,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:47:48,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41395 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:47:48,027 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:47:48,028 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T03:47:48,029 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:47:48,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41395 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:47:48,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:47:48,038 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store 2024-11-22T03:47:48,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41395 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:47:48,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:47:48,046 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:47:48,046 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:47:48,046 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:47:48,046 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:47:48,046 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:47:48,046 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:47:48,046 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:47:48,047 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732247268046Disabling compacts and flushes for region at 1732247268046Disabling writes for close at 1732247268046Writing region close event to WAL at 1732247268046Closed at 1732247268046 2024-11-22T03:47:48,048 WARN [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/.initializing 2024-11-22T03:47:48,048 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/WALs/c85114ed5096,39239,1732247267914 2024-11-22T03:47:48,051 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C39239%2C1732247267914, suffix=, logDir=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/WALs/c85114ed5096,39239,1732247267914, archiveDir=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/oldWALs, maxLogs=10 2024-11-22T03:47:48,052 INFO [master/c85114ed5096:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C39239%2C1732247267914.1732247268051 2024-11-22T03:47:48,058 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/WALs/c85114ed5096,39239,1732247267914/c85114ed5096%2C39239%2C1732247267914.1732247268051 2024-11-22T03:47:48,059 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44871:44871),(127.0.0.1/127.0.0.1:38179:38179)] 2024-11-22T03:47:48,061 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:47:48,061 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:47:48,061 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:48,061 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:48,063 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:48,064 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T03:47:48,064 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:48,065 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:47:48,065 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:48,066 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T03:47:48,066 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:48,067 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:47:48,067 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:48,068 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T03:47:48,068 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:48,069 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:47:48,069 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:48,070 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T03:47:48,070 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:48,071 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:47:48,071 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:48,072 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:48,072 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:48,074 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:48,074 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:48,075 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T03:47:48,076 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:47:48,078 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:47:48,079 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=690370, jitterRate=-0.12214919924736023}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T03:47:48,080 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732247268062Initializing all the Stores at 1732247268063 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247268063Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247268063Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247268063Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247268063Cleaning up temporary data from old regions at 1732247268074 (+11 ms)Region opened successfully at 1732247268080 (+6 ms) 2024-11-22T03:47:48,080 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T03:47:48,084 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@147c8f18, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c85114ed5096/172.17.0.2:0 2024-11-22T03:47:48,085 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T03:47:48,085 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T03:47:48,085 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T03:47:48,085 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T03:47:48,086 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T03:47:48,087 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T03:47:48,087 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T03:47:48,089 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T03:47:48,090 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T03:47:48,091 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T03:47:48,092 INFO [master/c85114ed5096:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T03:47:48,092 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T03:47:48,093 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T03:47:48,094 INFO [master/c85114ed5096:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T03:47:48,094 INFO [regionserver/c85114ed5096:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:47:48,095 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T03:47:48,095 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T03:47:48,096 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T03:47:48,097 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T03:47:48,100 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T03:47:48,100 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T03:47:48,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:47:48,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:47:48,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:48,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:48,102 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c85114ed5096,39239,1732247267914, sessionid=0x100658b06a30000, setting cluster-up flag (Was=false) 2024-11-22T03:47:48,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:48,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:48,107 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T03:47:48,108 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c85114ed5096,39239,1732247267914 2024-11-22T03:47:48,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:48,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:48,113 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T03:47:48,114 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c85114ed5096,39239,1732247267914 2024-11-22T03:47:48,116 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T03:47:48,118 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T03:47:48,118 INFO [master/c85114ed5096:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T03:47:48,118 INFO [master/c85114ed5096:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T03:47:48,119 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c85114ed5096,39239,1732247267914 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T03:47:48,120 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:47:48,120 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:47:48,121 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:47:48,121 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:47:48,121 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c85114ed5096:0, corePoolSize=10, maxPoolSize=10 2024-11-22T03:47:48,121 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,121 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c85114ed5096:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:47:48,121 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,122 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732247298122 2024-11-22T03:47:48,122 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T03:47:48,122 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T03:47:48,122 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T03:47:48,123 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T03:47:48,123 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T03:47:48,123 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T03:47:48,123 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,123 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T03:47:48,123 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T03:47:48,123 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T03:47:48,123 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:47:48,124 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T03:47:48,124 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T03:47:48,124 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T03:47:48,124 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247268124,5,FailOnTimeoutGroup] 2024-11-22T03:47:48,124 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247268124,5,FailOnTimeoutGroup] 2024-11-22T03:47:48,124 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,124 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T03:47:48,124 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,124 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,125 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:48,125 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T03:47:48,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41395 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:47:48,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:47:48,135 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T03:47:48,135 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360 2024-11-22T03:47:48,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:47:48,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41395 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:47:48,145 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:47:48,147 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:47:48,148 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:47:48,148 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:48,149 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:47:48,149 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:47:48,150 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:47:48,150 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:48,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:47:48,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:47:48,152 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:47:48,153 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:48,153 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:47:48,153 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:47:48,155 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:47:48,155 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:48,155 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:47:48,155 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:47:48,156 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740 2024-11-22T03:47:48,157 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740 2024-11-22T03:47:48,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:47:48,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:47:48,159 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:47:48,160 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:47:48,163 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:47:48,163 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=812313, jitterRate=0.03291040658950806}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:47:48,164 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732247268145Initializing all the Stores at 1732247268146 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247268146Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247268147 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247268147Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247268147Cleaning up temporary data from old regions at 1732247268158 (+11 ms)Region opened successfully at 1732247268164 (+6 ms) 2024-11-22T03:47:48,164 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:47:48,165 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:47:48,165 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:47:48,165 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:47:48,165 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:47:48,165 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:47:48,165 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732247268164Disabling compacts and flushes for region at 1732247268164Disabling writes for close at 1732247268165 (+1 ms)Writing region close event to WAL at 1732247268165Closed at 1732247268165 2024-11-22T03:47:48,167 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:47:48,167 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T03:47:48,167 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T03:47:48,169 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:47:48,170 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T03:47:48,174 INFO [RS:0;c85114ed5096:35197 {}] regionserver.HRegionServer(746): ClusterId : 848e7b6c-b358-43e4-9220-2fbd73c20848 2024-11-22T03:47:48,174 DEBUG [RS:0;c85114ed5096:35197 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T03:47:48,176 DEBUG [RS:0;c85114ed5096:35197 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T03:47:48,176 DEBUG [RS:0;c85114ed5096:35197 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T03:47:48,177 DEBUG [RS:0;c85114ed5096:35197 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T03:47:48,178 DEBUG [RS:0;c85114ed5096:35197 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c73e1f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c85114ed5096/172.17.0.2:0 2024-11-22T03:47:48,189 DEBUG [RS:0;c85114ed5096:35197 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c85114ed5096:35197 2024-11-22T03:47:48,189 INFO [RS:0;c85114ed5096:35197 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T03:47:48,189 INFO [RS:0;c85114ed5096:35197 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T03:47:48,189 DEBUG [RS:0;c85114ed5096:35197 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T03:47:48,190 INFO [RS:0;c85114ed5096:35197 {}] regionserver.HRegionServer(2659): reportForDuty to master=c85114ed5096,39239,1732247267914 with port=35197, startcode=1732247267957 2024-11-22T03:47:48,190 DEBUG [RS:0;c85114ed5096:35197 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T03:47:48,192 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47613, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T03:47:48,193 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39239 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c85114ed5096,35197,1732247267957 2024-11-22T03:47:48,193 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39239 {}] master.ServerManager(517): Registering regionserver=c85114ed5096,35197,1732247267957 2024-11-22T03:47:48,195 DEBUG [RS:0;c85114ed5096:35197 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360 2024-11-22T03:47:48,195 DEBUG [RS:0;c85114ed5096:35197 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43749 2024-11-22T03:47:48,195 DEBUG [RS:0;c85114ed5096:35197 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T03:47:48,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:47:48,197 DEBUG [RS:0;c85114ed5096:35197 {}] zookeeper.ZKUtil(111): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c85114ed5096,35197,1732247267957 2024-11-22T03:47:48,197 WARN [RS:0;c85114ed5096:35197 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:47:48,197 INFO [RS:0;c85114ed5096:35197 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:47:48,198 DEBUG [RS:0;c85114ed5096:35197 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957 2024-11-22T03:47:48,198 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c85114ed5096,35197,1732247267957] 2024-11-22T03:47:48,201 INFO [RS:0;c85114ed5096:35197 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T03:47:48,204 INFO [RS:0;c85114ed5096:35197 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T03:47:48,204 INFO [RS:0;c85114ed5096:35197 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T03:47:48,204 INFO [RS:0;c85114ed5096:35197 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,204 INFO [RS:0;c85114ed5096:35197 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T03:47:48,205 INFO [RS:0;c85114ed5096:35197 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T03:47:48,205 INFO [RS:0;c85114ed5096:35197 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,205 DEBUG [RS:0;c85114ed5096:35197 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,205 DEBUG [RS:0;c85114ed5096:35197 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,205 DEBUG [RS:0;c85114ed5096:35197 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,205 DEBUG [RS:0;c85114ed5096:35197 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,206 DEBUG [RS:0;c85114ed5096:35197 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,206 DEBUG [RS:0;c85114ed5096:35197 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c85114ed5096:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:47:48,206 DEBUG [RS:0;c85114ed5096:35197 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,206 DEBUG [RS:0;c85114ed5096:35197 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,206 DEBUG [RS:0;c85114ed5096:35197 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,206 DEBUG [RS:0;c85114ed5096:35197 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,206 DEBUG [RS:0;c85114ed5096:35197 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,206 DEBUG [RS:0;c85114ed5096:35197 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,206 DEBUG [RS:0;c85114ed5096:35197 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c85114ed5096:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:47:48,206 DEBUG [RS:0;c85114ed5096:35197 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:47:48,209 INFO [RS:0;c85114ed5096:35197 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,209 INFO [RS:0;c85114ed5096:35197 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,209 INFO [RS:0;c85114ed5096:35197 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,209 INFO [RS:0;c85114ed5096:35197 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,210 INFO [RS:0;c85114ed5096:35197 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,210 INFO [RS:0;c85114ed5096:35197 {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,35197,1732247267957-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:47:48,224 INFO [RS:0;c85114ed5096:35197 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T03:47:48,224 INFO [RS:0;c85114ed5096:35197 {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,35197,1732247267957-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,224 INFO [RS:0;c85114ed5096:35197 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,225 INFO [RS:0;c85114ed5096:35197 {}] regionserver.Replication(171): c85114ed5096,35197,1732247267957 started 2024-11-22T03:47:48,240 INFO [RS:0;c85114ed5096:35197 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,240 INFO [RS:0;c85114ed5096:35197 {}] regionserver.HRegionServer(1482): Serving as c85114ed5096,35197,1732247267957, RpcServer on c85114ed5096/172.17.0.2:35197, sessionid=0x100658b06a30001 2024-11-22T03:47:48,241 DEBUG [RS:0;c85114ed5096:35197 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T03:47:48,241 DEBUG [RS:0;c85114ed5096:35197 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c85114ed5096,35197,1732247267957 2024-11-22T03:47:48,241 DEBUG [RS:0;c85114ed5096:35197 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c85114ed5096,35197,1732247267957' 2024-11-22T03:47:48,241 DEBUG [RS:0;c85114ed5096:35197 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T03:47:48,241 DEBUG [RS:0;c85114ed5096:35197 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T03:47:48,242 DEBUG [RS:0;c85114ed5096:35197 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T03:47:48,242 DEBUG [RS:0;c85114ed5096:35197 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T03:47:48,242 DEBUG [RS:0;c85114ed5096:35197 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c85114ed5096,35197,1732247267957 2024-11-22T03:47:48,242 DEBUG [RS:0;c85114ed5096:35197 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c85114ed5096,35197,1732247267957' 2024-11-22T03:47:48,242 DEBUG [RS:0;c85114ed5096:35197 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T03:47:48,243 DEBUG [RS:0;c85114ed5096:35197 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T03:47:48,243 DEBUG [RS:0;c85114ed5096:35197 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T03:47:48,243 INFO [RS:0;c85114ed5096:35197 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T03:47:48,243 INFO [RS:0;c85114ed5096:35197 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T03:47:48,321 WARN [c85114ed5096:39239 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T03:47:48,346 INFO [RS:0;c85114ed5096:35197 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C35197%2C1732247267957, suffix=, logDir=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957, archiveDir=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/oldWALs, maxLogs=32 2024-11-22T03:47:48,347 INFO [RS:0;c85114ed5096:35197 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C35197%2C1732247267957.1732247268346 2024-11-22T03:47:48,353 INFO [RS:0;c85114ed5096:35197 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247268346 2024-11-22T03:47:48,354 DEBUG [RS:0;c85114ed5096:35197 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38179:38179),(127.0.0.1/127.0.0.1:44871:44871)] 2024-11-22T03:47:48,571 DEBUG [c85114ed5096:39239 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T03:47:48,572 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c85114ed5096,35197,1732247267957 2024-11-22T03:47:48,573 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c85114ed5096,35197,1732247267957, state=OPENING 2024-11-22T03:47:48,575 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T03:47:48,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:48,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:47:48,577 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:47:48,578 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:47:48,578 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c85114ed5096,35197,1732247267957}] 2024-11-22T03:47:48,578 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:47:48,732 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T03:47:48,736 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50703, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T03:47:48,740 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T03:47:48,741 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:47:48,743 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C35197%2C1732247267957.meta, suffix=.meta, logDir=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957, archiveDir=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/oldWALs, maxLogs=32 2024-11-22T03:47:48,744 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta 2024-11-22T03:47:48,749 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:47:48,750 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T03:47:48,750 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta 2024-11-22T03:47:48,751 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-22T03:47:48,753 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38179:38179),(127.0.0.1/127.0.0.1:44871:44871)] 2024-11-22T03:47:48,760 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:47:48,761 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T03:47:48,761 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T03:47:48,761 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T03:47:48,761 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T03:47:48,761 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:47:48,761 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T03:47:48,761 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T03:47:48,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:47:48,765 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:47:48,765 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:48,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:47:48,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:47:48,770 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:47:48,771 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:48,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:47:48,772 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:47:48,773 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:47:48,773 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:48,774 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:47:48,774 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:47:48,776 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:47:48,776 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:48,777 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:47:48,777 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:47:48,778 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740 2024-11-22T03:47:48,780 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740 2024-11-22T03:47:48,781 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:47:48,781 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:47:48,782 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:47:48,783 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:47:48,784 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=845829, jitterRate=0.07552790641784668}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:47:48,784 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T03:47:48,785 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732247268761Writing region info on filesystem at 1732247268761Initializing all the Stores at 1732247268763 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247268763Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247268764 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247268764Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247268764Cleaning up temporary data from old regions at 1732247268781 (+17 ms)Running coprocessor post-open hooks at 1732247268784 (+3 ms)Region opened successfully at 1732247268785 (+1 ms) 2024-11-22T03:47:48,787 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732247268732 2024-11-22T03:47:48,813 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T03:47:48,813 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T03:47:48,814 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c85114ed5096,35197,1732247267957 2024-11-22T03:47:48,815 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c85114ed5096,35197,1732247267957, state=OPEN 2024-11-22T03:47:48,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:47:48,818 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c85114ed5096,35197,1732247267957 2024-11-22T03:47:48,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:47:48,818 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:47:48,818 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:47:48,829 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T03:47:48,830 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c85114ed5096,35197,1732247267957 in 240 msec 2024-11-22T03:47:48,834 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T03:47:48,834 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 663 msec 2024-11-22T03:47:48,835 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:47:48,835 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T03:47:48,836 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:47:48,836 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c85114ed5096,35197,1732247267957, seqNum=-1] 2024-11-22T03:47:48,837 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:47:48,838 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49541, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:47:48,845 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 727 msec 2024-11-22T03:47:48,845 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732247268845, completionTime=-1 2024-11-22T03:47:48,845 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T03:47:48,845 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T03:47:48,847 INFO [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T03:47:48,847 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732247328847 2024-11-22T03:47:48,847 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732247388847 2024-11-22T03:47:48,847 INFO [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T03:47:48,848 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,39239,1732247267914-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,848 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,39239,1732247267914-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,848 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,39239,1732247267914-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,848 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c85114ed5096:39239, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,848 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,848 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,851 DEBUG [master/c85114ed5096:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T03:47:48,853 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.863sec 2024-11-22T03:47:48,853 INFO [master/c85114ed5096:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T03:47:48,854 INFO [master/c85114ed5096:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T03:47:48,854 INFO [master/c85114ed5096:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T03:47:48,854 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T03:47:48,854 INFO [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T03:47:48,854 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,39239,1732247267914-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:47:48,854 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,39239,1732247267914-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T03:47:48,857 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T03:47:48,857 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T03:47:48,857 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,39239,1732247267914-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,874 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2070bd8d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:47:48,874 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c85114ed5096,39239,-1 for getting cluster id 2024-11-22T03:47:48,874 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T03:47:48,876 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '848e7b6c-b358-43e4-9220-2fbd73c20848' 2024-11-22T03:47:48,877 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T03:47:48,877 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "848e7b6c-b358-43e4-9220-2fbd73c20848" 2024-11-22T03:47:48,877 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bfa870, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:47:48,877 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c85114ed5096,39239,-1] 2024-11-22T03:47:48,878 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T03:47:48,878 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:47:48,880 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44238, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T03:47:48,881 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7856d523, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:47:48,881 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:47:48,882 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c85114ed5096,35197,1732247267957, seqNum=-1] 2024-11-22T03:47:48,883 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:47:48,885 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35508, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:47:48,887 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c85114ed5096,39239,1732247267914 2024-11-22T03:47:48,888 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:47:48,892 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T03:47:48,914 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c85114ed5096:0 server-side Connection retries=45 2024-11-22T03:47:48,914 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:47:48,914 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:47:48,914 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:47:48,914 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:47:48,914 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:47:48,914 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T03:47:48,915 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:47:48,915 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43241 2024-11-22T03:47:48,918 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43241 connecting to ZooKeeper ensemble=127.0.0.1:57058 2024-11-22T03:47:48,919 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:47:48,921 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:47:48,927 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:432410x0, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:47:48,927 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:432410x0, quorum=127.0.0.1:57058, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-22T03:47:48,928 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43241-0x100658b06a30002 connected 2024-11-22T03:47:48,928 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-22T03:47:48,928 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T03:47:48,933 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T03:47:48,934 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:43241-0x100658b06a30002, quorum=127.0.0.1:57058, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T03:47:48,936 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43241-0x100658b06a30002, quorum=127.0.0.1:57058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:47:48,937 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43241 2024-11-22T03:47:48,939 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43241 2024-11-22T03:47:48,939 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43241 2024-11-22T03:47:48,940 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43241 2024-11-22T03:47:48,940 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43241 2024-11-22T03:47:48,942 INFO [RS:1;c85114ed5096:43241 {}] regionserver.HRegionServer(746): ClusterId : 848e7b6c-b358-43e4-9220-2fbd73c20848 2024-11-22T03:47:48,942 DEBUG [RS:1;c85114ed5096:43241 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T03:47:48,944 DEBUG [RS:1;c85114ed5096:43241 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T03:47:48,944 DEBUG [RS:1;c85114ed5096:43241 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T03:47:48,945 DEBUG [RS:1;c85114ed5096:43241 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T03:47:48,946 DEBUG [RS:1;c85114ed5096:43241 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d14e7f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c85114ed5096/172.17.0.2:0 2024-11-22T03:47:48,966 DEBUG [RS:1;c85114ed5096:43241 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;c85114ed5096:43241 2024-11-22T03:47:48,966 INFO [RS:1;c85114ed5096:43241 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T03:47:48,966 INFO [RS:1;c85114ed5096:43241 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T03:47:48,966 DEBUG [RS:1;c85114ed5096:43241 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T03:47:48,967 INFO [RS:1;c85114ed5096:43241 {}] regionserver.HRegionServer(2659): reportForDuty to master=c85114ed5096,39239,1732247267914 with port=43241, startcode=1732247268913 2024-11-22T03:47:48,967 DEBUG [RS:1;c85114ed5096:43241 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T03:47:48,969 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49011, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T03:47:48,969 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39239 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c85114ed5096,43241,1732247268913 2024-11-22T03:47:48,970 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39239 {}] master.ServerManager(517): Registering regionserver=c85114ed5096,43241,1732247268913 2024-11-22T03:47:48,971 DEBUG [RS:1;c85114ed5096:43241 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360 2024-11-22T03:47:48,972 DEBUG [RS:1;c85114ed5096:43241 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43749 2024-11-22T03:47:48,972 DEBUG [RS:1;c85114ed5096:43241 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T03:47:48,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:47:48,974 DEBUG [RS:1;c85114ed5096:43241 {}] zookeeper.ZKUtil(111): regionserver:43241-0x100658b06a30002, quorum=127.0.0.1:57058, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c85114ed5096,43241,1732247268913 2024-11-22T03:47:48,974 WARN [RS:1;c85114ed5096:43241 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:47:48,974 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c85114ed5096,43241,1732247268913] 2024-11-22T03:47:48,974 INFO [RS:1;c85114ed5096:43241 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:47:48,974 DEBUG [RS:1;c85114ed5096:43241 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913 2024-11-22T03:47:48,978 INFO [RS:1;c85114ed5096:43241 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T03:47:48,981 INFO [RS:1;c85114ed5096:43241 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T03:47:48,981 INFO [RS:1;c85114ed5096:43241 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T03:47:48,982 INFO [RS:1;c85114ed5096:43241 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,986 INFO [RS:1;c85114ed5096:43241 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T03:47:48,987 INFO [RS:1;c85114ed5096:43241 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T03:47:48,987 INFO [RS:1;c85114ed5096:43241 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,987 DEBUG [RS:1;c85114ed5096:43241 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,988 DEBUG [RS:1;c85114ed5096:43241 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,988 DEBUG [RS:1;c85114ed5096:43241 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,988 DEBUG [RS:1;c85114ed5096:43241 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,988 DEBUG [RS:1;c85114ed5096:43241 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,988 DEBUG [RS:1;c85114ed5096:43241 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c85114ed5096:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:47:48,988 DEBUG [RS:1;c85114ed5096:43241 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,988 DEBUG [RS:1;c85114ed5096:43241 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,988 DEBUG [RS:1;c85114ed5096:43241 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,988 DEBUG [RS:1;c85114ed5096:43241 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,988 DEBUG [RS:1;c85114ed5096:43241 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,988 DEBUG [RS:1;c85114ed5096:43241 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:47:48,988 DEBUG [RS:1;c85114ed5096:43241 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c85114ed5096:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:47:48,988 DEBUG [RS:1;c85114ed5096:43241 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:47:48,989 INFO [RS:1;c85114ed5096:43241 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,989 INFO [RS:1;c85114ed5096:43241 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,989 INFO [RS:1;c85114ed5096:43241 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,989 INFO [RS:1;c85114ed5096:43241 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,989 INFO [RS:1;c85114ed5096:43241 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:48,989 INFO [RS:1;c85114ed5096:43241 {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,43241,1732247268913-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:47:49,020 INFO [RS:1;c85114ed5096:43241 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T03:47:49,020 INFO [RS:1;c85114ed5096:43241 {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,43241,1732247268913-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:49,020 INFO [RS:1;c85114ed5096:43241 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:49,020 INFO [RS:1;c85114ed5096:43241 {}] regionserver.Replication(171): c85114ed5096,43241,1732247268913 started 2024-11-22T03:47:49,034 INFO [RS:1;c85114ed5096:43241 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:47:49,034 INFO [RS:1;c85114ed5096:43241 {}] regionserver.HRegionServer(1482): Serving as c85114ed5096,43241,1732247268913, RpcServer on c85114ed5096/172.17.0.2:43241, sessionid=0x100658b06a30002 2024-11-22T03:47:49,034 DEBUG [RS:1;c85114ed5096:43241 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T03:47:49,034 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;c85114ed5096:43241,5,FailOnTimeoutGroup] 2024-11-22T03:47:49,034 DEBUG [RS:1;c85114ed5096:43241 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c85114ed5096,43241,1732247268913 2024-11-22T03:47:49,034 DEBUG [RS:1;c85114ed5096:43241 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c85114ed5096,43241,1732247268913' 2024-11-22T03:47:49,035 DEBUG [RS:1;c85114ed5096:43241 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T03:47:49,035 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-22T03:47:49,035 DEBUG [RS:1;c85114ed5096:43241 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T03:47:49,035 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T03:47:49,036 DEBUG [RS:1;c85114ed5096:43241 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T03:47:49,036 DEBUG [RS:1;c85114ed5096:43241 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T03:47:49,036 DEBUG [RS:1;c85114ed5096:43241 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c85114ed5096,43241,1732247268913 2024-11-22T03:47:49,036 DEBUG [RS:1;c85114ed5096:43241 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c85114ed5096,43241,1732247268913' 2024-11-22T03:47:49,036 DEBUG [RS:1;c85114ed5096:43241 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T03:47:49,036 DEBUG [RS:1;c85114ed5096:43241 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T03:47:49,037 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is c85114ed5096,39239,1732247267914 2024-11-22T03:47:49,037 DEBUG [RS:1;c85114ed5096:43241 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T03:47:49,037 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@65cf3c19 2024-11-22T03:47:49,037 INFO [RS:1;c85114ed5096:43241 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T03:47:49,037 INFO [RS:1;c85114ed5096:43241 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T03:47:49,037 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T03:47:49,038 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44242, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T03:47:49,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39239 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T03:47:49,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39239 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T03:47:49,039 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39239 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:47:49,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39239 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-22T03:47:49,042 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T03:47:49,042 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:49,042 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39239 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-22T03:47:49,044 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T03:47:49,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39239 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T03:47:49,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741835_1011 (size=393) 2024-11-22T03:47:49,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41395 is added to blk_1073741835_1011 (size=393) 2024-11-22T03:47:49,052 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6fd9fc11d70fd9a0496bcc8ab732b0af, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360 2024-11-22T03:47:49,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41395 is added to blk_1073741836_1012 (size=76) 2024-11-22T03:47:49,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741836_1012 (size=76) 2024-11-22T03:47:49,139 INFO [RS:1;c85114ed5096:43241 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C43241%2C1732247268913, suffix=, logDir=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913, archiveDir=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/oldWALs, maxLogs=32 2024-11-22T03:47:49,141 INFO [RS:1;c85114ed5096:43241 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C43241%2C1732247268913.1732247269140 2024-11-22T03:47:49,152 INFO [RS:1;c85114ed5096:43241 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 2024-11-22T03:47:49,152 DEBUG [RS:1;c85114ed5096:43241 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38179:38179),(127.0.0.1/127.0.0.1:44871:44871)] 2024-11-22T03:47:49,172 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:47:49,179 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:47:49,460 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:47:49,460 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 6fd9fc11d70fd9a0496bcc8ab732b0af, disabling compactions & flushes 2024-11-22T03:47:49,460 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af. 2024-11-22T03:47:49,460 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af. 2024-11-22T03:47:49,460 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af. after waiting 0 ms 2024-11-22T03:47:49,460 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af. 2024-11-22T03:47:49,460 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af. 2024-11-22T03:47:49,460 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6fd9fc11d70fd9a0496bcc8ab732b0af: Waiting for close lock at 1732247269460Disabling compacts and flushes for region at 1732247269460Disabling writes for close at 1732247269460Writing region close event to WAL at 1732247269460Closed at 1732247269460 2024-11-22T03:47:49,462 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T03:47:49,462 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732247269462"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732247269462"}]},"ts":"1732247269462"} 2024-11-22T03:47:49,465 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T03:47:49,466 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T03:47:49,466 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732247269466"}]},"ts":"1732247269466"} 2024-11-22T03:47:49,468 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-22T03:47:49,471 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {c85114ed5096=0} racks are {/default-rack=0} 2024-11-22T03:47:49,475 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-22T03:47:49,475 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-22T03:47:49,475 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-22T03:47:49,475 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-22T03:47:49,475 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-22T03:47:49,475 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-22T03:47:49,475 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-22T03:47:49,476 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6fd9fc11d70fd9a0496bcc8ab732b0af, ASSIGN}] 2024-11-22T03:47:49,478 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6fd9fc11d70fd9a0496bcc8ab732b0af, ASSIGN 2024-11-22T03:47:49,479 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6fd9fc11d70fd9a0496bcc8ab732b0af, ASSIGN; state=OFFLINE, location=c85114ed5096,35197,1732247267957; forceNewPlan=false, retain=false 2024-11-22T03:47:49,630 INFO [c85114ed5096:39239 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-22T03:47:49,630 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6fd9fc11d70fd9a0496bcc8ab732b0af, regionState=OPENING, regionLocation=c85114ed5096,35197,1732247267957 2024-11-22T03:47:49,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6fd9fc11d70fd9a0496bcc8ab732b0af, ASSIGN because future has completed 2024-11-22T03:47:49,634 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6fd9fc11d70fd9a0496bcc8ab732b0af, server=c85114ed5096,35197,1732247267957}] 2024-11-22T03:47:49,700 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T03:47:49,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:47:49,719 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:47:49,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:47:49,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:47:49,791 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af. 2024-11-22T03:47:49,791 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 6fd9fc11d70fd9a0496bcc8ab732b0af, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:47:49,792 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 6fd9fc11d70fd9a0496bcc8ab732b0af 2024-11-22T03:47:49,792 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:47:49,792 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 6fd9fc11d70fd9a0496bcc8ab732b0af 2024-11-22T03:47:49,792 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 6fd9fc11d70fd9a0496bcc8ab732b0af 2024-11-22T03:47:49,793 INFO [StoreOpener-6fd9fc11d70fd9a0496bcc8ab732b0af-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6fd9fc11d70fd9a0496bcc8ab732b0af 2024-11-22T03:47:49,794 INFO [StoreOpener-6fd9fc11d70fd9a0496bcc8ab732b0af-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6fd9fc11d70fd9a0496bcc8ab732b0af columnFamilyName info 2024-11-22T03:47:49,794 DEBUG [StoreOpener-6fd9fc11d70fd9a0496bcc8ab732b0af-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:47:49,795 INFO [StoreOpener-6fd9fc11d70fd9a0496bcc8ab732b0af-1 {}] regionserver.HStore(327): Store=6fd9fc11d70fd9a0496bcc8ab732b0af/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:47:49,795 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 6fd9fc11d70fd9a0496bcc8ab732b0af 2024-11-22T03:47:49,796 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af 2024-11-22T03:47:49,796 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af 2024-11-22T03:47:49,797 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 6fd9fc11d70fd9a0496bcc8ab732b0af 2024-11-22T03:47:49,797 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 6fd9fc11d70fd9a0496bcc8ab732b0af 2024-11-22T03:47:49,798 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 6fd9fc11d70fd9a0496bcc8ab732b0af 2024-11-22T03:47:49,801 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:47:49,801 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 6fd9fc11d70fd9a0496bcc8ab732b0af; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=720550, jitterRate=-0.08377380669116974}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T03:47:49,801 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6fd9fc11d70fd9a0496bcc8ab732b0af 2024-11-22T03:47:49,802 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 6fd9fc11d70fd9a0496bcc8ab732b0af: Running coprocessor pre-open hook at 1732247269792Writing region info on filesystem at 1732247269792Initializing all the Stores at 1732247269793 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247269793Cleaning up temporary data from old regions at 1732247269797 (+4 ms)Running coprocessor post-open hooks at 1732247269801 (+4 ms)Region opened successfully at 1732247269802 (+1 ms) 2024-11-22T03:47:49,803 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af., pid=6, masterSystemTime=1732247269787 2024-11-22T03:47:49,806 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af. 2024-11-22T03:47:49,806 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af. 2024-11-22T03:47:49,807 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6fd9fc11d70fd9a0496bcc8ab732b0af, regionState=OPEN, openSeqNum=2, regionLocation=c85114ed5096,35197,1732247267957 2024-11-22T03:47:49,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6fd9fc11d70fd9a0496bcc8ab732b0af, server=c85114ed5096,35197,1732247267957 because future has completed 2024-11-22T03:47:49,816 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T03:47:49,817 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 6fd9fc11d70fd9a0496bcc8ab732b0af, server=c85114ed5096,35197,1732247267957 in 178 msec 2024-11-22T03:47:49,821 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T03:47:49,821 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6fd9fc11d70fd9a0496bcc8ab732b0af, ASSIGN in 341 msec 2024-11-22T03:47:49,822 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T03:47:49,823 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732247269823"}]},"ts":"1732247269823"} 2024-11-22T03:47:49,826 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-22T03:47:49,828 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T03:47:49,831 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 789 msec 2024-11-22T03:47:54,202 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-22T03:47:55,296 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T03:47:55,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:47:55,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:47:55,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:47:55,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:47:58,749 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T03:47:58,749 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T03:47:58,750 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-22T03:47:58,750 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-22T03:47:58,750 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:47:58,751 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T03:47:59,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39239 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T03:47:59,127 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-22T03:47:59,127 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-22T03:47:59,132 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-22T03:47:59,132 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af. 2024-11-22T03:47:59,150 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:47:59,153 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:47:59,154 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:47:59,154 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:47:59,154 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:47:59,155 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f3178e6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:47:59,155 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e536a4b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:47:59,250 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1fdaf9dc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/java.io.tmpdir/jetty-localhost-46555-hadoop-hdfs-3_4_1-tests_jar-_-any-2655524180853374990/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:47:59,250 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a4a97e4{HTTP/1.1, (http/1.1)}{localhost:46555} 2024-11-22T03:47:59,250 INFO [Time-limited test {}] server.Server(415): Started @116577ms 2024-11-22T03:47:59,251 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:47:59,287 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:47:59,290 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:47:59,291 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:47:59,291 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:47:59,291 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:47:59,292 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ab3e732{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:47:59,292 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6796b4ca{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:47:59,310 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data5/current/BP-1396528010-172.17.0.2-1732247267310/current, will proceed with Du for space computation calculation, 2024-11-22T03:47:59,310 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data6/current/BP-1396528010-172.17.0.2-1732247267310/current, will proceed with Du for space computation calculation, 2024-11-22T03:47:59,326 WARN [Thread-808 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:47:59,329 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd461c87d014abafb with lease ID 0x8ff38ce0f6701b19: Processing first storage report for DS-66fc4285-b115-478e-ae49-651241a2a320 from datanode DatanodeRegistration(127.0.0.1:42385, datanodeUuid=f8752521-28ed-48c6-b362-b2f1a86d4921, infoPort=40197, infoSecurePort=0, ipcPort=44215, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310) 2024-11-22T03:47:59,329 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd461c87d014abafb with lease ID 0x8ff38ce0f6701b19: from storage DS-66fc4285-b115-478e-ae49-651241a2a320 node DatanodeRegistration(127.0.0.1:42385, datanodeUuid=f8752521-28ed-48c6-b362-b2f1a86d4921, infoPort=40197, infoSecurePort=0, ipcPort=44215, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:47:59,329 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd461c87d014abafb with lease ID 0x8ff38ce0f6701b19: Processing first storage report for DS-1a681bac-276b-440a-9ab9-292e246ed2ab from datanode DatanodeRegistration(127.0.0.1:42385, datanodeUuid=f8752521-28ed-48c6-b362-b2f1a86d4921, infoPort=40197, infoSecurePort=0, ipcPort=44215, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310) 2024-11-22T03:47:59,329 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd461c87d014abafb with lease ID 0x8ff38ce0f6701b19: from storage DS-1a681bac-276b-440a-9ab9-292e246ed2ab node DatanodeRegistration(127.0.0.1:42385, datanodeUuid=f8752521-28ed-48c6-b362-b2f1a86d4921, infoPort=40197, infoSecurePort=0, ipcPort=44215, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T03:47:59,387 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@23995e63{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/java.io.tmpdir/jetty-localhost-35805-hadoop-hdfs-3_4_1-tests_jar-_-any-10560405331391978480/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:47:59,387 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7860ae0{HTTP/1.1, (http/1.1)}{localhost:35805} 2024-11-22T03:47:59,387 INFO [Time-limited test {}] server.Server(415): Started @116714ms 2024-11-22T03:47:59,388 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:47:59,424 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:47:59,427 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:47:59,428 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:47:59,428 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:47:59,428 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:47:59,429 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77a26ee8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:47:59,429 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e9a1cef{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:47:59,451 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data8/current/BP-1396528010-172.17.0.2-1732247267310/current, will proceed with Du for space computation calculation, 2024-11-22T03:47:59,451 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data7/current/BP-1396528010-172.17.0.2-1732247267310/current, will proceed with Du for space computation calculation, 2024-11-22T03:47:59,470 WARN [Thread-843 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:47:59,473 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x10f64cf80b0e14f5 with lease ID 0x8ff38ce0f6701b1a: Processing first storage report for DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b from datanode DatanodeRegistration(127.0.0.1:43531, datanodeUuid=4cf6bb88-a7d6-444d-b436-b07a5f9b11ad, infoPort=37117, infoSecurePort=0, ipcPort=36423, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310) 2024-11-22T03:47:59,473 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x10f64cf80b0e14f5 with lease ID 0x8ff38ce0f6701b1a: from storage DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b node DatanodeRegistration(127.0.0.1:43531, datanodeUuid=4cf6bb88-a7d6-444d-b436-b07a5f9b11ad, infoPort=37117, infoSecurePort=0, ipcPort=36423, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:47:59,473 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x10f64cf80b0e14f5 with lease ID 0x8ff38ce0f6701b1a: Processing first storage report for DS-aa1a51b8-2e2f-4a72-8fd0-3cc0f713fa66 from datanode DatanodeRegistration(127.0.0.1:43531, datanodeUuid=4cf6bb88-a7d6-444d-b436-b07a5f9b11ad, infoPort=37117, infoSecurePort=0, ipcPort=36423, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310) 2024-11-22T03:47:59,473 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x10f64cf80b0e14f5 with lease ID 0x8ff38ce0f6701b1a: from storage DS-aa1a51b8-2e2f-4a72-8fd0-3cc0f713fa66 node DatanodeRegistration(127.0.0.1:43531, datanodeUuid=4cf6bb88-a7d6-444d-b436-b07a5f9b11ad, infoPort=37117, infoSecurePort=0, ipcPort=36423, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T03:47:59,528 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6bc02cff{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/java.io.tmpdir/jetty-localhost-33043-hadoop-hdfs-3_4_1-tests_jar-_-any-18132862002953396292/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:47:59,528 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13c2a569{HTTP/1.1, (http/1.1)}{localhost:33043} 2024-11-22T03:47:59,528 INFO [Time-limited test {}] server.Server(415): Started @116855ms 2024-11-22T03:47:59,529 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:47:59,587 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data9/current/BP-1396528010-172.17.0.2-1732247267310/current, will proceed with Du for space computation calculation, 2024-11-22T03:47:59,587 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data10/current/BP-1396528010-172.17.0.2-1732247267310/current, will proceed with Du for space computation calculation, 2024-11-22T03:47:59,608 WARN [Thread-878 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:47:59,611 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf7761b11cdc9ab2f with lease ID 0x8ff38ce0f6701b1b: Processing first storage report for DS-67fd5dc9-f1a6-476b-b965-c614ada01e60 from datanode DatanodeRegistration(127.0.0.1:34921, datanodeUuid=a3474c82-49cd-4f65-a18b-576ee48dd28c, infoPort=40945, infoSecurePort=0, ipcPort=39097, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310) 2024-11-22T03:47:59,611 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf7761b11cdc9ab2f with lease ID 0x8ff38ce0f6701b1b: from storage DS-67fd5dc9-f1a6-476b-b965-c614ada01e60 node DatanodeRegistration(127.0.0.1:34921, datanodeUuid=a3474c82-49cd-4f65-a18b-576ee48dd28c, infoPort=40945, infoSecurePort=0, ipcPort=39097, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:47:59,611 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf7761b11cdc9ab2f with lease ID 0x8ff38ce0f6701b1b: Processing first storage report for DS-7e500a5f-9d3f-4358-989a-19f4f2fe57b4 from datanode DatanodeRegistration(127.0.0.1:34921, datanodeUuid=a3474c82-49cd-4f65-a18b-576ee48dd28c, infoPort=40945, infoSecurePort=0, ipcPort=39097, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310) 2024-11-22T03:47:59,611 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf7761b11cdc9ab2f with lease ID 0x8ff38ce0f6701b1b: from storage DS-7e500a5f-9d3f-4358-989a-19f4f2fe57b4 node DatanodeRegistration(127.0.0.1:34921, datanodeUuid=a3474c82-49cd-4f65-a18b-576ee48dd28c, infoPort=40945, infoSecurePort=0, ipcPort=39097, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:47:59,648 WARN [ResponseProcessor for block BP-1396528010-172.17.0.2-1732247267310:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1396528010-172.17.0.2-1732247267310:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:47:59,648 WARN [ResponseProcessor for block BP-1396528010-172.17.0.2-1732247267310:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1396528010-172.17.0.2-1732247267310:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:47:59,649 WARN [DataStreamer for file /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 block BP-1396528010-172.17.0.2-1732247267310:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK], DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]) is bad. 2024-11-22T03:47:59,649 WARN [DataStreamer for file /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta block BP-1396528010-172.17.0.2-1732247267310:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK], DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]) is bad. 2024-11-22T03:47:59,648 WARN [ResponseProcessor for block BP-1396528010-172.17.0.2-1732247267310:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1396528010-172.17.0.2-1732247267310:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:47:59,648 WARN [ResponseProcessor for block BP-1396528010-172.17.0.2-1732247267310:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1396528010-172.17.0.2-1732247267310:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1396528010-172.17.0.2-1732247267310:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:47:59,649 WARN [DataStreamer for file /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247268346 block BP-1396528010-172.17.0.2-1732247267310:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK], DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]) is bad. 2024-11-22T03:47:59,649 WARN [DataStreamer for file /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/WALs/c85114ed5096,39239,1732247267914/c85114ed5096%2C39239%2C1732247267914.1732247268051 block BP-1396528010-172.17.0.2-1732247267310:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK], DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]) is bad. 2024-11-22T03:47:59,650 WARN [PacketResponder: BP-1396528010-172.17.0.2-1732247267310:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41395] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:47:59,651 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:36066 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34635:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36066 dst: /127.0.0.1:34635 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:47:59,650 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:43276 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41395:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43276 dst: /127.0.0.1:41395 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:47:59,651 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1440811221_22 at /127.0.0.1:35258 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:41395:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35258 dst: /127.0.0.1:41395 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:47:59,651 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1440811221_22 at /127.0.0.1:51262 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:34635:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51262 dst: /127.0.0.1:34635 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:47:59,653 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:36050 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34635:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36050 dst: /127.0.0.1:34635 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:47:59,653 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:43272 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41395:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43272 dst: /127.0.0.1:41395 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:47:59,652 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1353209707_22 at /127.0.0.1:36028 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34635:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36028 dst: /127.0.0.1:34635 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:47:59,654 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@202b01d5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:47:59,653 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1353209707_22 at /127.0.0.1:43256 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41395:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43256 dst: /127.0.0.1:41395 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:47:59,654 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@100caf4c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:47:59,654 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:47:59,655 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d1555bb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:47:59,655 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32ba89de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/hadoop.log.dir/,STOPPED} 2024-11-22T03:47:59,656 WARN [BP-1396528010-172.17.0.2-1732247267310 heartbeating to localhost/127.0.0.1:43749 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:47:59,656 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:47:59,656 WARN [BP-1396528010-172.17.0.2-1732247267310 heartbeating to localhost/127.0.0.1:43749 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1396528010-172.17.0.2-1732247267310 (Datanode Uuid 4f278f4c-34ba-4fc6-8845-7d0a79f5af99) service to localhost/127.0.0.1:43749 2024-11-22T03:47:59,656 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:47:59,657 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data3/current/BP-1396528010-172.17.0.2-1732247267310 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:47:59,657 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data4/current/BP-1396528010-172.17.0.2-1732247267310 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:47:59,657 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:47:59,658 WARN [DataStreamer for file /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta block BP-1396528010-172.17.0.2-1732247267310:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:47:59,658 WARN [DataStreamer for file /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247268346 block BP-1396528010-172.17.0.2-1732247267310:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:47:59,658 WARN [DataStreamer for file /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 block BP-1396528010-172.17.0.2-1732247267310:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:47:59,659 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@109fb582 {}] datanode.DataXceiver(331): 127.0.0.1:34635:DataXceiver error processing unknown operation src: /127.0.0.1:42462 dst: /127.0.0.1:34635 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:47:59,660 WARN [DataStreamer for file /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/WALs/c85114ed5096,39239,1732247267914/c85114ed5096%2C39239%2C1732247267914.1732247268051 block BP-1396528010-172.17.0.2-1732247267310:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:47:59,661 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@41fecd82{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:47:59,662 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@54e2df40{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:47:59,662 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:47:59,662 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c6cae60{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:47:59,662 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f0429fe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/hadoop.log.dir/,STOPPED} 2024-11-22T03:47:59,663 WARN [BP-1396528010-172.17.0.2-1732247267310 heartbeating to localhost/127.0.0.1:43749 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:47:59,663 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:47:59,663 WARN [BP-1396528010-172.17.0.2-1732247267310 heartbeating to localhost/127.0.0.1:43749 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1396528010-172.17.0.2-1732247267310 (Datanode Uuid 8a296d4b-845c-407a-bc72-2ef38bd79c2d) service to localhost/127.0.0.1:43749 2024-11-22T03:47:59,663 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:47:59,664 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data1/current/BP-1396528010-172.17.0.2-1732247267310 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:47:59,664 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data2/current/BP-1396528010-172.17.0.2-1732247267310 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:47:59,664 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:47:59,668 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af., hostname=c85114ed5096,35197,1732247267957, seqNum=2] 2024-11-22T03:47:59,669 ERROR [FSHLog-0-hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360-prefix:c85114ed5096,35197,1732247267957 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:47:59,669 WARN [FSHLog-0-hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360-prefix:c85114ed5096,35197,1732247267957 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:47:59,670 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:47:59,670 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c85114ed5096%2C35197%2C1732247267957:(num 1732247268346) roll requested 2024-11-22T03:47:59,670 INFO [regionserver/c85114ed5096:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C35197%2C1732247267957.1732247279670 2024-11-22T03:47:59,673 WARN [Thread-900 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:47:59,673 WARN [Thread-900 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK], DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]) is bad. 2024-11-22T03:47:59,673 WARN [Thread-900 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741838_1018 2024-11-22T03:47:59,675 WARN [Thread-900 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK] 2024-11-22T03:47:59,681 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:59,681 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:59,681 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:59,681 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:59,681 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:47:59,681 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247268346 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247279670 2024-11-22T03:47:59,682 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:47:59,682 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:47:59,683 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-22T03:47:59,683 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-22T03:47:59,683 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247268346 2024-11-22T03:47:59,684 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40945:40945),(127.0.0.1/127.0.0.1:37117:37117)] 2024-11-22T03:47:59,684 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247268346 is not closed yet, will try archiving it next time 2024-11-22T03:47:59,686 WARN [IPC Server handler 2 on default port 43749 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247268346 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-11-22T03:47:59,689 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247268346 after 5ms 2024-11-22T03:48:00,030 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:00,990 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:01,684 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:01,686 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247279670 2024-11-22T03:48:01,687 WARN [ResponseProcessor for block BP-1396528010-172.17.0.2-1732247267310:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1396528010-172.17.0.2-1732247267310:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:01,688 WARN [DataStreamer for file /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247279670 block BP-1396528010-172.17.0.2-1732247267310:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK], DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]) is bad. 2024-11-22T03:48:01,689 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:55238 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:34921:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55238 dst: /127.0.0.1:34921 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:01,690 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:56790 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:43531:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56790 dst: /127.0.0.1:43531 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:01,692 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6bc02cff{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:48:01,693 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13c2a569{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:48:01,693 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:48:01,693 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e9a1cef{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:48:01,693 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77a26ee8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/hadoop.log.dir/,STOPPED} 2024-11-22T03:48:01,694 WARN [BP-1396528010-172.17.0.2-1732247267310 heartbeating to localhost/127.0.0.1:43749 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:48:01,694 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:48:01,694 WARN [BP-1396528010-172.17.0.2-1732247267310 heartbeating to localhost/127.0.0.1:43749 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1396528010-172.17.0.2-1732247267310 (Datanode Uuid a3474c82-49cd-4f65-a18b-576ee48dd28c) service to localhost/127.0.0.1:43749 2024-11-22T03:48:01,694 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:48:01,695 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data9/current/BP-1396528010-172.17.0.2-1732247267310 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:48:01,695 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data10/current/BP-1396528010-172.17.0.2-1732247267310 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:48:01,695 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:48:02,031 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:02,990 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:03,685 WARN [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]] 2024-11-22T03:48:03,685 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:03,685 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c85114ed5096%2C35197%2C1732247267957:(num 1732247279670) roll requested 2024-11-22T03:48:03,686 INFO [regionserver/c85114ed5096:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C35197%2C1732247267957.1732247283686 2024-11-22T03:48:03,689 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:03,689 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK], DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]) is bad. 2024-11-22T03:48:03,689 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741840_1022 2024-11-22T03:48:03,689 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK] 2024-11-22T03:48:03,690 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247268346 after 4007ms 2024-11-22T03:48:03,692 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34921 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:03,692 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:56814 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data8]'}, localName='127.0.0.1:43531', datanodeUuid='4cf6bb88-a7d6-444d-b436-b07a5f9b11ad', xmitsInProgress=0}:Exception transferring block BP-1396528010-172.17.0.2-1732247267310:blk_1073741841_1023 to mirror 127.0.0.1:34921 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:03,692 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK], DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]) is bad. 2024-11-22T03:48:03,692 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741841_1023 2024-11-22T03:48:03,692 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:56814 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T03:48:03,693 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:56814 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:43531:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56814 dst: /127.0.0.1:43531 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:03,693 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK] 2024-11-22T03:48:03,694 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:03,694 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK], DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]) is bad. 2024-11-22T03:48:03,694 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741842_1024 2024-11-22T03:48:03,695 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK] 2024-11-22T03:48:03,699 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:03,699 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:03,699 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:03,699 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:03,700 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:03,700 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247279670 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247283686 2024-11-22T03:48:03,700 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T03:48:03,701 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37117:37117),(127.0.0.1/127.0.0.1:40197:40197)] 2024-11-22T03:48:03,701 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247268346 is not closed yet, will try archiving it next time 2024-11-22T03:48:03,701 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247279670 is not closed yet, will try archiving it next time 2024-11-22T03:48:03,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741839_1021 (size=2431) 2024-11-22T03:48:04,031 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:04,104 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247268346 is not closed yet, will try archiving it next time 2024-11-22T03:48:04,991 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:05,701 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:05,704 WARN [ResponseProcessor for block BP-1396528010-172.17.0.2-1732247267310:blk_1073741843_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1396528010-172.17.0.2-1732247267310:blk_1073741843_1025 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:05,705 WARN [DataStreamer for file /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247283686 block BP-1396528010-172.17.0.2-1732247267310:blk_1073741843_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK], DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]) is bad. 2024-11-22T03:48:05,706 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:56828 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:43531:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56828 dst: /127.0.0.1:43531 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:05,707 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:36670 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:42385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36670 dst: /127.0.0.1:42385 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:05,708 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@23995e63{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:48:05,709 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7860ae0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:48:05,709 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:48:05,709 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6796b4ca{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:48:05,709 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ab3e732{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/hadoop.log.dir/,STOPPED} 2024-11-22T03:48:05,709 WARN [BP-1396528010-172.17.0.2-1732247267310 heartbeating to localhost/127.0.0.1:43749 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:48:05,710 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:48:05,710 WARN [BP-1396528010-172.17.0.2-1732247267310 heartbeating to localhost/127.0.0.1:43749 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1396528010-172.17.0.2-1732247267310 (Datanode Uuid 4cf6bb88-a7d6-444d-b436-b07a5f9b11ad) service to localhost/127.0.0.1:43749 2024-11-22T03:48:05,710 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:48:05,710 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data7/current/BP-1396528010-172.17.0.2-1732247267310 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:48:05,710 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data8/current/BP-1396528010-172.17.0.2-1732247267310 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:48:05,711 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:48:05,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35197 {}] regionserver.HRegion(8855): Flush requested on 6fd9fc11d70fd9a0496bcc8ab732b0af 2024-11-22T03:48:05,719 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6fd9fc11d70fd9a0496bcc8ab732b0af 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:48:05,742 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/af8ccb4964bf42cc9c5d12ee5894ef47 is 1080, key is row0002/info:/1732247281697/Put/seqid=0 2024-11-22T03:48:05,743 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:05,743 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK], DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]) is bad. 2024-11-22T03:48:05,743 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741844_1027 2024-11-22T03:48:05,744 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK] 2024-11-22T03:48:05,746 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43531 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:05,746 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:36700 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741845_1028] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data6]'}, localName='127.0.0.1:42385', datanodeUuid='f8752521-28ed-48c6-b362-b2f1a86d4921', xmitsInProgress=0}:Exception transferring block BP-1396528010-172.17.0.2-1732247267310:blk_1073741845_1028 to mirror 127.0.0.1:43531 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:05,746 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK], DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]) is bad. 2024-11-22T03:48:05,746 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741845_1028 2024-11-22T03:48:05,746 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:36700 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741845_1028] {}] datanode.BlockReceiver(316): Block 1073741845 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:48:05,746 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:36700 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741845_1028] {}] datanode.DataXceiver(331): 127.0.0.1:42385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36700 dst: /127.0.0.1:42385 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:05,747 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK] 2024-11-22T03:48:05,748 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:05,748 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK], DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]) is bad. 2024-11-22T03:48:05,748 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741846_1029 2024-11-22T03:48:05,749 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK] 2024-11-22T03:48:05,750 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:05,750 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK], DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]) is bad. 2024-11-22T03:48:05,750 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741847_1030 2024-11-22T03:48:05,751 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK] 2024-11-22T03:48:05,751 WARN [IPC Server handler 2 on default port 43749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T03:48:05,751 WARN [IPC Server handler 2 on default port 43749 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T03:48:05,751 WARN [IPC Server handler 2 on default port 43749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T03:48:05,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741848_1031 (size=10347) 2024-11-22T03:48:06,032 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:06,156 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/af8ccb4964bf42cc9c5d12ee5894ef47 2024-11-22T03:48:06,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/af8ccb4964bf42cc9c5d12ee5894ef47 as hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/af8ccb4964bf42cc9c5d12ee5894ef47 2024-11-22T03:48:06,176 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/af8ccb4964bf42cc9c5d12ee5894ef47, entries=5, sequenceid=11, filesize=10.1 K 2024-11-22T03:48:06,177 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 6fd9fc11d70fd9a0496bcc8ab732b0af in 457ms, sequenceid=11, compaction requested=false 2024-11-22T03:48:06,177 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6fd9fc11d70fd9a0496bcc8ab732b0af: 2024-11-22T03:48:06,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35197 {}] regionserver.HRegion(8855): Flush requested on 6fd9fc11d70fd9a0496bcc8ab732b0af 2024-11-22T03:48:06,349 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6fd9fc11d70fd9a0496bcc8ab732b0af 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-22T03:48:06,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/bbdaa9a81dd14d3aa015d29a6da1661c is 1080, key is row0007/info:/1732247285720/Put/seqid=0 2024-11-22T03:48:06,356 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:06,356 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK], DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]) is bad. 2024-11-22T03:48:06,356 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741849_1032 2024-11-22T03:48:06,357 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK] 2024-11-22T03:48:06,358 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:06,358 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK], DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]) is bad. 2024-11-22T03:48:06,358 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741850_1033 2024-11-22T03:48:06,359 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK] 2024-11-22T03:48:06,361 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34921 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:06,361 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:36748 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data6]'}, localName='127.0.0.1:42385', datanodeUuid='f8752521-28ed-48c6-b362-b2f1a86d4921', xmitsInProgress=0}:Exception transferring block BP-1396528010-172.17.0.2-1732247267310:blk_1073741851_1034 to mirror 127.0.0.1:34921 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:06,361 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK], DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]) is bad. 2024-11-22T03:48:06,361 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741851_1034 2024-11-22T03:48:06,361 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:36748 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:48:06,361 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:36748 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:42385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36748 dst: /127.0.0.1:42385 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:06,362 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK] 2024-11-22T03:48:06,363 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:06,364 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK], DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]) is bad. 2024-11-22T03:48:06,364 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741852_1035 2024-11-22T03:48:06,364 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK] 2024-11-22T03:48:06,365 WARN [IPC Server handler 4 on default port 43749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T03:48:06,365 WARN [IPC Server handler 4 on default port 43749 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T03:48:06,365 WARN [IPC Server handler 4 on default port 43749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T03:48:06,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741853_1036 (size=12506) 2024-11-22T03:48:06,769 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/bbdaa9a81dd14d3aa015d29a6da1661c 2024-11-22T03:48:06,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/bbdaa9a81dd14d3aa015d29a6da1661c as hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/bbdaa9a81dd14d3aa015d29a6da1661c 2024-11-22T03:48:06,782 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/bbdaa9a81dd14d3aa015d29a6da1661c, entries=7, sequenceid=24, filesize=12.2 K 2024-11-22T03:48:06,784 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 6fd9fc11d70fd9a0496bcc8ab732b0af in 434ms, sequenceid=24, compaction requested=false 2024-11-22T03:48:06,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6fd9fc11d70fd9a0496bcc8ab732b0af: 2024-11-22T03:48:06,784 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-22T03:48:06,784 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:48:06,784 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/bbdaa9a81dd14d3aa015d29a6da1661c because midkey is the same as first or last row 2024-11-22T03:48:06,992 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:07,702 WARN [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]] 2024-11-22T03:48:07,702 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:07,702 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c85114ed5096%2C35197%2C1732247267957:(num 1732247283686) roll requested 2024-11-22T03:48:07,703 INFO [regionserver/c85114ed5096:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C35197%2C1732247267957.1732247287703 2024-11-22T03:48:07,710 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:07,710 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK], DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]) is bad. 2024-11-22T03:48:07,711 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741854_1037 2024-11-22T03:48:07,711 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK] 2024-11-22T03:48:07,713 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:07,713 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK], DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]) is bad. 2024-11-22T03:48:07,713 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741855_1038 2024-11-22T03:48:07,714 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK] 2024-11-22T03:48:07,715 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:07,715 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK], DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]) is bad. 2024-11-22T03:48:07,716 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741856_1039 2024-11-22T03:48:07,716 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK] 2024-11-22T03:48:07,718 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:07,718 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK], DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]) is bad. 2024-11-22T03:48:07,718 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741857_1040 2024-11-22T03:48:07,718 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK] 2024-11-22T03:48:07,719 WARN [IPC Server handler 0 on default port 43749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T03:48:07,719 WARN [IPC Server handler 0 on default port 43749 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T03:48:07,719 WARN [IPC Server handler 0 on default port 43749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T03:48:07,722 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:07,722 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:07,722 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:07,723 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:07,723 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:07,723 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247283686 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247287703 2024-11-22T03:48:07,724 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40197:40197)] 2024-11-22T03:48:07,724 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247268346 is not closed yet, will try archiving it next time 2024-11-22T03:48:07,724 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247283686 is not closed yet, will try archiving it next time 2024-11-22T03:48:07,724 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247279670 to hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/oldWALs/c85114ed5096%2C35197%2C1732247267957.1732247279670 2024-11-22T03:48:07,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741843_1026 (size=25992) 2024-11-22T03:48:07,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35197 {}] regionserver.HRegion(8855): Flush requested on 6fd9fc11d70fd9a0496bcc8ab732b0af 2024-11-22T03:48:07,777 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6fd9fc11d70fd9a0496bcc8ab732b0af 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-22T03:48:07,781 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/90b5e45180b14cc59a6264ce5af3be26 is 1079, key is tmprow/info:/1732247287775/Put/seqid=0 2024-11-22T03:48:07,783 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:07,783 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK], DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]) is bad. 2024-11-22T03:48:07,783 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741859_1042 2024-11-22T03:48:07,784 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK] 2024-11-22T03:48:07,785 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:07,785 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK], DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]) is bad. 2024-11-22T03:48:07,785 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741860_1043 2024-11-22T03:48:07,786 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK] 2024-11-22T03:48:07,787 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:07,788 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK], DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]) is bad. 2024-11-22T03:48:07,788 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741861_1044 2024-11-22T03:48:07,788 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK] 2024-11-22T03:48:07,790 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41395 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:07,790 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:36764 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741862_1045] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data6]'}, localName='127.0.0.1:42385', datanodeUuid='f8752521-28ed-48c6-b362-b2f1a86d4921', xmitsInProgress=0}:Exception transferring block BP-1396528010-172.17.0.2-1732247267310:blk_1073741862_1045 to mirror 127.0.0.1:41395 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:07,790 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK], DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]) is bad. 2024-11-22T03:48:07,790 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741862_1045 2024-11-22T03:48:07,790 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:36764 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741862_1045] {}] datanode.BlockReceiver(316): Block 1073741862 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:48:07,791 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:36764 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741862_1045] {}] datanode.DataXceiver(331): 127.0.0.1:42385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36764 dst: /127.0.0.1:42385 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:07,791 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK] 2024-11-22T03:48:07,792 WARN [IPC Server handler 3 on default port 43749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T03:48:07,792 WARN [IPC Server handler 3 on default port 43749 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T03:48:07,792 WARN [IPC Server handler 3 on default port 43749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T03:48:07,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741863_1046 (size=6027) 2024-11-22T03:48:08,032 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:08,127 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247268346 is not closed yet, will try archiving it next time 2024-11-22T03:48:08,196 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/90b5e45180b14cc59a6264ce5af3be26 2024-11-22T03:48:08,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/90b5e45180b14cc59a6264ce5af3be26 as hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/90b5e45180b14cc59a6264ce5af3be26 2024-11-22T03:48:08,209 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/90b5e45180b14cc59a6264ce5af3be26, entries=1, sequenceid=34, filesize=5.9 K 2024-11-22T03:48:08,210 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 6fd9fc11d70fd9a0496bcc8ab732b0af in 434ms, sequenceid=34, compaction requested=true 2024-11-22T03:48:08,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6fd9fc11d70fd9a0496bcc8ab732b0af: 2024-11-22T03:48:08,210 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-22T03:48:08,210 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:48:08,210 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/bbdaa9a81dd14d3aa015d29a6da1661c because midkey is the same as first or last row 2024-11-22T03:48:08,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fd9fc11d70fd9a0496bcc8ab732b0af:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:48:08,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:48:08,211 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:48:08,212 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:48:08,212 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.HStore(1541): 6fd9fc11d70fd9a0496bcc8ab732b0af/info is initiating minor compaction (all files) 2024-11-22T03:48:08,212 INFO [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6fd9fc11d70fd9a0496bcc8ab732b0af/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af. 2024-11-22T03:48:08,212 INFO [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/af8ccb4964bf42cc9c5d12ee5894ef47, hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/bbdaa9a81dd14d3aa015d29a6da1661c, hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/90b5e45180b14cc59a6264ce5af3be26] into tmpdir=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp, totalSize=28.2 K 2024-11-22T03:48:08,213 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] compactions.Compactor(225): Compacting af8ccb4964bf42cc9c5d12ee5894ef47, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732247281697 2024-11-22T03:48:08,213 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] compactions.Compactor(225): Compacting bbdaa9a81dd14d3aa015d29a6da1661c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732247285720 2024-11-22T03:48:08,214 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] compactions.Compactor(225): Compacting 90b5e45180b14cc59a6264ce5af3be26, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732247287775 2024-11-22T03:48:08,228 INFO [RS:0;c85114ed5096:35197-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fd9fc11d70fd9a0496bcc8ab732b0af#info#compaction#21 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:48:08,228 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/2baa085778b24d22a19bb6189547075e is 1080, key is row0002/info:/1732247281697/Put/seqid=0 2024-11-22T03:48:08,230 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:08,230 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK], DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]) is bad. 2024-11-22T03:48:08,230 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741864_1047 2024-11-22T03:48:08,231 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK] 2024-11-22T03:48:08,232 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:08,232 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK], DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]) is bad. 2024-11-22T03:48:08,232 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741865_1048 2024-11-22T03:48:08,232 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK] 2024-11-22T03:48:08,233 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:08,233 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK], DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]) is bad. 2024-11-22T03:48:08,233 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741866_1049 2024-11-22T03:48:08,234 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK] 2024-11-22T03:48:08,235 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:08,235 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK], DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]) is bad. 2024-11-22T03:48:08,235 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741867_1050 2024-11-22T03:48:08,235 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK] 2024-11-22T03:48:08,236 WARN [IPC Server handler 2 on default port 43749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T03:48:08,236 WARN [IPC Server handler 2 on default port 43749 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T03:48:08,236 WARN [IPC Server handler 2 on default port 43749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T03:48:08,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741868_1051 (size=17994) 2024-11-22T03:48:08,647 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/2baa085778b24d22a19bb6189547075e as hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/2baa085778b24d22a19bb6189547075e 2024-11-22T03:48:08,657 INFO [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6fd9fc11d70fd9a0496bcc8ab732b0af/info of 6fd9fc11d70fd9a0496bcc8ab732b0af into 2baa085778b24d22a19bb6189547075e(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:48:08,657 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6fd9fc11d70fd9a0496bcc8ab732b0af: 2024-11-22T03:48:08,657 INFO [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af., storeName=6fd9fc11d70fd9a0496bcc8ab732b0af/info, priority=13, startTime=1732247288210; duration=0sec 2024-11-22T03:48:08,657 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-22T03:48:08,657 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:48:08,658 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/2baa085778b24d22a19bb6189547075e because midkey is the same as first or last row 2024-11-22T03:48:08,658 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-22T03:48:08,658 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:48:08,658 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/2baa085778b24d22a19bb6189547075e because midkey is the same as first or last row 2024-11-22T03:48:08,658 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-22T03:48:08,658 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:48:08,658 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/2baa085778b24d22a19bb6189547075e because midkey is the same as first or last row 2024-11-22T03:48:08,658 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:48:08,658 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fd9fc11d70fd9a0496bcc8ab732b0af:info 2024-11-22T03:48:08,992 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:09,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35197 {}] regionserver.HRegion(8855): Flush requested on 6fd9fc11d70fd9a0496bcc8ab732b0af 2024-11-22T03:48:09,209 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6fd9fc11d70fd9a0496bcc8ab732b0af 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-22T03:48:09,215 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/ed8c02d319e242e1a946bfb4552e34a2 is 1079, key is tmprow/info:/1732247289207/Put/seqid=0 2024-11-22T03:48:09,219 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41395 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:09,219 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49110 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741869_1052] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data6]'}, localName='127.0.0.1:42385', datanodeUuid='f8752521-28ed-48c6-b362-b2f1a86d4921', xmitsInProgress=0}:Exception transferring block BP-1396528010-172.17.0.2-1732247267310:blk_1073741869_1052 to mirror 127.0.0.1:41395 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:09,219 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK], DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]) is bad. 2024-11-22T03:48:09,219 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741869_1052 2024-11-22T03:48:09,219 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49110 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741869_1052] {}] datanode.BlockReceiver(316): Block 1073741869 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:48:09,220 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49110 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741869_1052] {}] datanode.DataXceiver(331): 127.0.0.1:42385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49110 dst: /127.0.0.1:42385 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:09,220 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK] 2024-11-22T03:48:09,222 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:09,222 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK], DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]) is bad. 2024-11-22T03:48:09,222 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741870_1053 2024-11-22T03:48:09,223 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK] 2024-11-22T03:48:09,226 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34921 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:09,226 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49116 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data6]'}, localName='127.0.0.1:42385', datanodeUuid='f8752521-28ed-48c6-b362-b2f1a86d4921', xmitsInProgress=0}:Exception transferring block BP-1396528010-172.17.0.2-1732247267310:blk_1073741871_1054 to mirror 127.0.0.1:34921 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:09,226 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK], DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]) is bad. 2024-11-22T03:48:09,226 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741871_1054 2024-11-22T03:48:09,226 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49116 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:48:09,226 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49116 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:42385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49116 dst: /127.0.0.1:42385 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:09,227 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK] 2024-11-22T03:48:09,229 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43531 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:09,229 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49124 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data6]'}, localName='127.0.0.1:42385', datanodeUuid='f8752521-28ed-48c6-b362-b2f1a86d4921', xmitsInProgress=0}:Exception transferring block BP-1396528010-172.17.0.2-1732247267310:blk_1073741872_1055 to mirror 127.0.0.1:43531 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:09,229 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK], DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]) is bad. 2024-11-22T03:48:09,229 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741872_1055 2024-11-22T03:48:09,229 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49124 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:48:09,230 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49124 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:42385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49124 dst: /127.0.0.1:42385 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:09,230 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK] 2024-11-22T03:48:09,231 WARN [IPC Server handler 1 on default port 43749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T03:48:09,231 WARN [IPC Server handler 1 on default port 43749 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T03:48:09,231 WARN [IPC Server handler 1 on default port 43749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T03:48:09,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741873_1056 (size=6027) 2024-11-22T03:48:09,346 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@40b88336[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42385, datanodeUuid=f8752521-28ed-48c6-b362-b2f1a86d4921, infoPort=40197, infoSecurePort=0, ipcPort=44215, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310):Failed to transfer BP-1396528010-172.17.0.2-1732247267310:blk_1073741848_1031 to 127.0.0.1:34635 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:09,346 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5c52b36a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42385, datanodeUuid=f8752521-28ed-48c6-b362-b2f1a86d4921, infoPort=40197, infoSecurePort=0, ipcPort=44215, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310):Failed to transfer BP-1396528010-172.17.0.2-1732247267310:blk_1073741853_1036 to 127.0.0.1:43531 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:09,636 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/ed8c02d319e242e1a946bfb4552e34a2 2024-11-22T03:48:09,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/ed8c02d319e242e1a946bfb4552e34a2 as hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/ed8c02d319e242e1a946bfb4552e34a2 2024-11-22T03:48:09,656 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/ed8c02d319e242e1a946bfb4552e34a2, entries=1, sequenceid=45, filesize=5.9 K 2024-11-22T03:48:09,657 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 6fd9fc11d70fd9a0496bcc8ab732b0af in 448ms, sequenceid=45, compaction requested=false 2024-11-22T03:48:09,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6fd9fc11d70fd9a0496bcc8ab732b0af: 2024-11-22T03:48:09,658 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-22T03:48:09,658 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:48:09,658 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/2baa085778b24d22a19bb6189547075e because midkey is the same as first or last row 2024-11-22T03:48:09,724 WARN [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]] 2024-11-22T03:48:09,724 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:09,725 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c85114ed5096%2C35197%2C1732247267957:(num 1732247287703) roll requested 2024-11-22T03:48:09,725 INFO [regionserver/c85114ed5096:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C35197%2C1732247267957.1732247289725 2024-11-22T03:48:09,729 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34635 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:09,729 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49144 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data6]'}, localName='127.0.0.1:42385', datanodeUuid='f8752521-28ed-48c6-b362-b2f1a86d4921', xmitsInProgress=0}:Exception transferring block BP-1396528010-172.17.0.2-1732247267310:blk_1073741874_1057 to mirror 127.0.0.1:34635 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:09,729 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK], DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]) is bad. 2024-11-22T03:48:09,729 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741874_1057 2024-11-22T03:48:09,729 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49144 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T03:48:09,729 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49144 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:42385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49144 dst: /127.0.0.1:42385 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:09,730 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK] 2024-11-22T03:48:09,731 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:09,731 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK], DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]) is bad. 2024-11-22T03:48:09,731 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741875_1058 2024-11-22T03:48:09,731 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK] 2024-11-22T03:48:09,733 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41395 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:09,733 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49152 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741876_1059] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data6]'}, localName='127.0.0.1:42385', datanodeUuid='f8752521-28ed-48c6-b362-b2f1a86d4921', xmitsInProgress=0}:Exception transferring block BP-1396528010-172.17.0.2-1732247267310:blk_1073741876_1059 to mirror 127.0.0.1:41395 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:09,734 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK], DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]) is bad. 2024-11-22T03:48:09,734 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49152 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741876_1059] {}] datanode.BlockReceiver(316): Block 1073741876 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T03:48:09,734 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741876_1059 2024-11-22T03:48:09,734 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49152 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741876_1059] {}] datanode.DataXceiver(331): 127.0.0.1:42385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49152 dst: /127.0.0.1:42385 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:09,734 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK] 2024-11-22T03:48:09,735 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:09,736 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK], DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]) is bad. 2024-11-22T03:48:09,736 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741877_1060 2024-11-22T03:48:09,736 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK] 2024-11-22T03:48:09,737 WARN [IPC Server handler 1 on default port 43749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T03:48:09,737 WARN [IPC Server handler 1 on default port 43749 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T03:48:09,737 WARN [IPC Server handler 1 on default port 43749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T03:48:09,740 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:09,740 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:09,740 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:09,740 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:09,740 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:09,741 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247287703 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247289725 2024-11-22T03:48:09,741 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40197:40197)] 2024-11-22T03:48:09,741 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247268346 is not closed yet, will try archiving it next time 2024-11-22T03:48:09,741 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247287703 is not closed yet, will try archiving it next time 2024-11-22T03:48:09,742 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247283686 to hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/oldWALs/c85114ed5096%2C35197%2C1732247267957.1732247283686 2024-11-22T03:48:09,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741858_1041 (size=13591) 2024-11-22T03:48:10,033 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:10,144 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247268346 is not closed yet, will try archiving it next time 2024-11-22T03:48:10,334 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5c52b36a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42385, datanodeUuid=f8752521-28ed-48c6-b362-b2f1a86d4921, infoPort=40197, infoSecurePort=0, ipcPort=44215, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310):Failed to transfer BP-1396528010-172.17.0.2-1732247267310:blk_1073741843_1026 to 127.0.0.1:43531 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:10,334 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@40b88336[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42385, datanodeUuid=f8752521-28ed-48c6-b362-b2f1a86d4921, infoPort=40197, infoSecurePort=0, ipcPort=44215, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310):Failed to transfer BP-1396528010-172.17.0.2-1732247267310:blk_1073741863_1046 to 127.0.0.1:41395 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:10,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35197 {}] regionserver.HRegion(8855): Flush requested on 6fd9fc11d70fd9a0496bcc8ab732b0af 2024-11-22T03:48:10,645 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6fd9fc11d70fd9a0496bcc8ab732b0af 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-22T03:48:10,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/f8df5916ca99462db132e7a8bc8b2d84 is 1079, key is tmprow/info:/1732247290643/Put/seqid=0 2024-11-22T03:48:10,652 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:10,652 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK], DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]) is bad. 2024-11-22T03:48:10,652 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741879_1062 2024-11-22T03:48:10,653 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK] 2024-11-22T03:48:10,654 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:10,654 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK], DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]) is bad. 2024-11-22T03:48:10,654 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741880_1063 2024-11-22T03:48:10,655 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK] 2024-11-22T03:48:10,656 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:10,656 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK], DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]) is bad. 2024-11-22T03:48:10,656 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741881_1064 2024-11-22T03:48:10,656 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK] 2024-11-22T03:48:10,658 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:10,658 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK], DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]) is bad. 2024-11-22T03:48:10,658 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741882_1065 2024-11-22T03:48:10,659 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK] 2024-11-22T03:48:10,659 WARN [IPC Server handler 0 on default port 43749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T03:48:10,659 WARN [IPC Server handler 0 on default port 43749 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T03:48:10,659 WARN [IPC Server handler 0 on default port 43749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T03:48:10,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741883_1066 (size=6027) 2024-11-22T03:48:10,993 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:11,064 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/f8df5916ca99462db132e7a8bc8b2d84 2024-11-22T03:48:11,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/f8df5916ca99462db132e7a8bc8b2d84 as hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/f8df5916ca99462db132e7a8bc8b2d84 2024-11-22T03:48:11,076 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/f8df5916ca99462db132e7a8bc8b2d84, entries=1, sequenceid=55, filesize=5.9 K 2024-11-22T03:48:11,077 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 6fd9fc11d70fd9a0496bcc8ab732b0af in 433ms, sequenceid=55, compaction requested=true 2024-11-22T03:48:11,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6fd9fc11d70fd9a0496bcc8ab732b0af: 2024-11-22T03:48:11,077 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-22T03:48:11,077 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:48:11,077 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/2baa085778b24d22a19bb6189547075e because midkey is the same as first or last row 2024-11-22T03:48:11,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6fd9fc11d70fd9a0496bcc8ab732b0af:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:48:11,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:48:11,077 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:48:11,079 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:48:11,079 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.HStore(1541): 6fd9fc11d70fd9a0496bcc8ab732b0af/info is initiating minor compaction (all files) 2024-11-22T03:48:11,079 INFO [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6fd9fc11d70fd9a0496bcc8ab732b0af/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af. 2024-11-22T03:48:11,079 INFO [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/2baa085778b24d22a19bb6189547075e, hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/ed8c02d319e242e1a946bfb4552e34a2, hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/f8df5916ca99462db132e7a8bc8b2d84] into tmpdir=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp, totalSize=29.3 K 2024-11-22T03:48:11,079 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2baa085778b24d22a19bb6189547075e, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732247281697 2024-11-22T03:48:11,079 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] compactions.Compactor(225): Compacting ed8c02d319e242e1a946bfb4552e34a2, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732247289207 2024-11-22T03:48:11,080 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] compactions.Compactor(225): Compacting f8df5916ca99462db132e7a8bc8b2d84, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732247290643 2024-11-22T03:48:11,099 INFO [RS:0;c85114ed5096:35197-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6fd9fc11d70fd9a0496bcc8ab732b0af#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:48:11,100 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/763501a5e687486fa59cb31320e271d7 is 1080, key is row0002/info:/1732247281697/Put/seqid=0 2024-11-22T03:48:11,101 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:11,102 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK], DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]) is bad. 2024-11-22T03:48:11,102 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741884_1067 2024-11-22T03:48:11,102 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK] 2024-11-22T03:48:11,104 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41395 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:11,104 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49172 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741885_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data6]'}, localName='127.0.0.1:42385', datanodeUuid='f8752521-28ed-48c6-b362-b2f1a86d4921', xmitsInProgress=0}:Exception transferring block BP-1396528010-172.17.0.2-1732247267310:blk_1073741885_1068 to mirror 127.0.0.1:41395 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:11,105 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK], DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK]) is bad. 2024-11-22T03:48:11,105 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741885_1068 2024-11-22T03:48:11,105 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49172 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741885_1068] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:48:11,105 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49172 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741885_1068] {}] datanode.DataXceiver(331): 127.0.0.1:42385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49172 dst: /127.0.0.1:42385 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:11,105 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41395,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK] 2024-11-22T03:48:11,107 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:11,107 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK], DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]) is bad. 2024-11-22T03:48:11,107 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741886_1069 2024-11-22T03:48:11,108 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK] 2024-11-22T03:48:11,113 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43531 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:11,113 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49180 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741887_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data6]'}, localName='127.0.0.1:42385', datanodeUuid='f8752521-28ed-48c6-b362-b2f1a86d4921', xmitsInProgress=0}:Exception transferring block BP-1396528010-172.17.0.2-1732247267310:blk_1073741887_1070 to mirror 127.0.0.1:43531 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:11,113 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK], DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]) is bad. 2024-11-22T03:48:11,113 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49180 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741887_1070] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:48:11,113 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741887_1070 2024-11-22T03:48:11,113 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:49180 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741887_1070] {}] datanode.DataXceiver(331): 127.0.0.1:42385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49180 dst: /127.0.0.1:42385 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:11,114 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK] 2024-11-22T03:48:11,114 WARN [IPC Server handler 1 on default port 43749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T03:48:11,114 WARN [IPC Server handler 1 on default port 43749 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T03:48:11,114 WARN [IPC Server handler 1 on default port 43749 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T03:48:11,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741888_1071 (size=18097) 2024-11-22T03:48:11,527 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/763501a5e687486fa59cb31320e271d7 as hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/763501a5e687486fa59cb31320e271d7 2024-11-22T03:48:11,537 INFO [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6fd9fc11d70fd9a0496bcc8ab732b0af/info of 6fd9fc11d70fd9a0496bcc8ab732b0af into 763501a5e687486fa59cb31320e271d7(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:48:11,537 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6fd9fc11d70fd9a0496bcc8ab732b0af: 2024-11-22T03:48:11,537 INFO [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af., storeName=6fd9fc11d70fd9a0496bcc8ab732b0af/info, priority=13, startTime=1732247291077; duration=0sec 2024-11-22T03:48:11,537 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-22T03:48:11,537 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:48:11,537 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/763501a5e687486fa59cb31320e271d7 because midkey is the same as first or last row 2024-11-22T03:48:11,538 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-22T03:48:11,538 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:48:11,538 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/763501a5e687486fa59cb31320e271d7 because midkey is the same as first or last row 2024-11-22T03:48:11,538 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-22T03:48:11,538 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:48:11,538 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/763501a5e687486fa59cb31320e271d7 because midkey is the same as first or last row 2024-11-22T03:48:11,538 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:48:11,538 DEBUG [RS:0;c85114ed5096:35197-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6fd9fc11d70fd9a0496bcc8ab732b0af:info 2024-11-22T03:48:11,742 WARN [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-22T03:48:11,742 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:11,871 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:48:11,875 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:48:11,876 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:48:11,876 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:48:11,877 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:48:11,877 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d479cab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:48:11,878 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@664f3a1c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:48:11,983 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@90b14a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/java.io.tmpdir/jetty-localhost-41669-hadoop-hdfs-3_4_1-tests_jar-_-any-8245406701382273997/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:48:11,984 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@f8e3d4c{HTTP/1.1, (http/1.1)}{localhost:41669} 2024-11-22T03:48:11,984 INFO [Time-limited test {}] server.Server(415): Started @129311ms 2024-11-22T03:48:11,986 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:48:12,033 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:12,059 WARN [Thread-986 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:48:12,067 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5dc153a603471aab with lease ID 0x8ff38ce0f6701b1c: from storage DS-5ddb5d59-a71a-4043-9647-298809519964 node DatanodeRegistration(127.0.0.1:33491, datanodeUuid=4f278f4c-34ba-4fc6-8845-7d0a79f5af99, infoPort=40439, infoSecurePort=0, ipcPort=42067, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:48:12,067 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5dc153a603471aab with lease ID 0x8ff38ce0f6701b1c: from storage DS-aefb9566-241f-4bc5-91f0-a57efdf3aa4d node DatanodeRegistration(127.0.0.1:33491, datanodeUuid=4f278f4c-34ba-4fc6-8845-7d0a79f5af99, infoPort=40439, infoSecurePort=0, ipcPort=42067, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:48:12,331 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5c52b36a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42385, datanodeUuid=f8752521-28ed-48c6-b362-b2f1a86d4921, infoPort=40197, infoSecurePort=0, ipcPort=44215, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310):Failed to transfer BP-1396528010-172.17.0.2-1732247267310:blk_1073741873_1056 to 127.0.0.1:34635 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:12,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33491 is added to blk_1073741868_1051 (size=17994) 2024-11-22T03:48:12,993 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:13,333 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@40b88336[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42385, datanodeUuid=f8752521-28ed-48c6-b362-b2f1a86d4921, infoPort=40197, infoSecurePort=0, ipcPort=44215, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310):Failed to transfer BP-1396528010-172.17.0.2-1732247267310:blk_1073741858_1041 to 127.0.0.1:34921 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:13,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33491 is added to blk_1073741883_1066 (size=6027) 2024-11-22T03:48:13,742 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:14,034 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:14,993 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:15,334 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5c52b36a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42385, datanodeUuid=f8752521-28ed-48c6-b362-b2f1a86d4921, infoPort=40197, infoSecurePort=0, ipcPort=44215, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310):Failed to transfer BP-1396528010-172.17.0.2-1732247267310:blk_1073741888_1071 to 127.0.0.1:34921 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:15,743 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:16,035 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:16,994 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:17,744 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:17,889 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T03:48:18,035 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:18,126 ERROR [FSHLog-0-hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData-prefix:c85114ed5096,39239,1732247267914 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:18,126 WARN [FSHLog-0-hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData-prefix:c85114ed5096,39239,1732247267914 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:18,127 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog c85114ed5096%2C39239%2C1732247267914:(num 1732247268051) roll requested 2024-11-22T03:48:18,128 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C39239%2C1732247267914.1732247298127 2024-11-22T03:48:18,133 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:18,133 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK], DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]) is bad. 2024-11-22T03:48:18,133 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741889_1072 2024-11-22T03:48:18,134 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK] 2024-11-22T03:48:18,137 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:18,137 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741890_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK], DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK]) is bad. 2024-11-22T03:48:18,137 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741890_1073 2024-11-22T03:48:18,137 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34921,DS-67fd5dc9-f1a6-476b-b965-c614ada01e60,DISK] 2024-11-22T03:48:18,142 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:18,142 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:18,142 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:18,143 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:18,143 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:18,143 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/WALs/c85114ed5096,39239,1732247267914/c85114ed5096%2C39239%2C1732247267914.1732247268051 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/WALs/c85114ed5096,39239,1732247267914/c85114ed5096%2C39239%2C1732247267914.1732247298127 2024-11-22T03:48:18,143 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:18,144 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:18,144 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/WALs/c85114ed5096,39239,1732247267914/c85114ed5096%2C39239%2C1732247267914.1732247268051 2024-11-22T03:48:18,144 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40439:40439),(127.0.0.1/127.0.0.1:40197:40197)] 2024-11-22T03:48:18,144 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/WALs/c85114ed5096,39239,1732247267914/c85114ed5096%2C39239%2C1732247267914.1732247268051 is not closed yet, will try archiving it next time 2024-11-22T03:48:18,144 WARN [IPC Server handler 0 on default port 43749 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/WALs/c85114ed5096,39239,1732247267914/c85114ed5096%2C39239%2C1732247267914.1732247268051 has not been closed. Lease recovery is in progress. RecoveryId = 1075 for block blk_1073741830_1006 2024-11-22T03:48:18,145 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/WALs/c85114ed5096,39239,1732247267914/c85114ed5096%2C39239%2C1732247267914.1732247268051 after 1ms 2024-11-22T03:48:18,994 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:19,744 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:20,994 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:21,745 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:22,084 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@1c4a05c1 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1396528010-172.17.0.2-1732247267310:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:34635,null,null]) java.net.ConnectException: Call From c85114ed5096/172.17.0.2 to localhost:34719 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-22T03:48:22,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33491 is added to blk_1073741833_1020 (size=455) 2024-11-22T03:48:22,147 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/WALs/c85114ed5096,39239,1732247267914/c85114ed5096%2C39239%2C1732247267914.1732247268051 after 4003ms 2024-11-22T03:48:22,719 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247268346 to hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/oldWALs/c85114ed5096%2C35197%2C1732247267957.1732247268346 2024-11-22T03:48:22,721 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247287703 to hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/oldWALs/c85114ed5096%2C35197%2C1732247267957.1732247287703 2024-11-22T03:48:22,995 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:23,746 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:24,063 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@304d854b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33491, datanodeUuid=4f278f4c-34ba-4fc6-8845-7d0a79f5af99, infoPort=40439, infoSecurePort=0, ipcPort=42067, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310):Failed to transfer BP-1396528010-172.17.0.2-1732247267310:blk_1073741836_1012 to 127.0.0.1:43531 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:24,063 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2d1c3c84[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33491, datanodeUuid=4f278f4c-34ba-4fc6-8845-7d0a79f5af99, infoPort=40439, infoSecurePort=0, ipcPort=42067, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310):Failed to transfer BP-1396528010-172.17.0.2-1732247267310:blk_1073741833_1020 to 127.0.0.1:43531 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:24,995 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:25,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:48:25,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:48:25,505 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C35197%2C1732247267957.1732247305504 2024-11-22T03:48:25,515 WARN [Thread-1021 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43531 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:25,514 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1353209707_22 at /127.0.0.1:48590 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741892_1076] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data4]'}, localName='127.0.0.1:33491', datanodeUuid='4f278f4c-34ba-4fc6-8845-7d0a79f5af99', xmitsInProgress=0}:Exception transferring block BP-1396528010-172.17.0.2-1732247267310:blk_1073741892_1076 to mirror 127.0.0.1:43531 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:25,515 WARN [Thread-1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33491,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK], DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]) is bad. 2024-11-22T03:48:25,515 WARN [Thread-1021 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741892_1076 2024-11-22T03:48:25,515 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1353209707_22 at /127.0.0.1:48590 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741892_1076] {}] datanode.BlockReceiver(316): Block 1073741892 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T03:48:25,516 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1353209707_22 at /127.0.0.1:48590 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741892_1076] {}] datanode.DataXceiver(331): 127.0.0.1:33491:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48590 dst: /127.0.0.1:33491 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:25,516 WARN [Thread-1021 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK] 2024-11-22T03:48:25,522 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:25,522 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:25,522 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:25,522 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:25,523 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:25,523 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247289725 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247305504 2024-11-22T03:48:25,524 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40197:40197),(127.0.0.1/127.0.0.1:40439:40439)] 2024-11-22T03:48:25,524 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247289725 is not closed yet, will try archiving it next time 2024-11-22T03:48:25,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741878_1061 (size=12911) 2024-11-22T03:48:25,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35197 {}] regionserver.HRegion(8855): Flush requested on 6fd9fc11d70fd9a0496bcc8ab732b0af 2024-11-22T03:48:25,530 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6fd9fc11d70fd9a0496bcc8ab732b0af 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-22T03:48:25,535 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/80c5a84e435e432c94184ffa28777713 is 1080, key is row0013/info:/1732247305526/Put/seqid=0 2024-11-22T03:48:25,539 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:48600 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741894_1078] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data4]'}, localName='127.0.0.1:33491', datanodeUuid='4f278f4c-34ba-4fc6-8845-7d0a79f5af99', xmitsInProgress=0}:Exception transferring block BP-1396528010-172.17.0.2-1732247267310:blk_1073741894_1078 to mirror 127.0.0.1:43531 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:25,539 WARN [Thread-1028 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1078 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43531 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:25,539 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:48600 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741894_1078] {}] datanode.BlockReceiver(316): Block 1073741894 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:48:25,539 WARN [Thread-1028 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741894_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33491,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK], DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]) is bad. 2024-11-22T03:48:25,539 WARN [Thread-1028 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741894_1078 2024-11-22T03:48:25,539 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:48600 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741894_1078] {}] datanode.DataXceiver(331): 127.0.0.1:33491:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48600 dst: /127.0.0.1:33491 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:25,540 WARN [Thread-1028 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK] 2024-11-22T03:48:25,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33491 is added to blk_1073741895_1079 (size=8190) 2024-11-22T03:48:25,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741895_1079 (size=8190) 2024-11-22T03:48:25,551 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/80c5a84e435e432c94184ffa28777713 2024-11-22T03:48:25,558 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/80c5a84e435e432c94184ffa28777713 as hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/80c5a84e435e432c94184ffa28777713 2024-11-22T03:48:25,563 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/80c5a84e435e432c94184ffa28777713, entries=3, sequenceid=66, filesize=8.0 K 2024-11-22T03:48:25,565 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for 6fd9fc11d70fd9a0496bcc8ab732b0af in 34ms, sequenceid=66, compaction requested=false 2024-11-22T03:48:25,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6fd9fc11d70fd9a0496bcc8ab732b0af: 2024-11-22T03:48:25,565 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-22T03:48:25,565 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:48:25,565 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/763501a5e687486fa59cb31320e271d7 because midkey is the same as first or last row 2024-11-22T03:48:25,746 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-22T03:48:25,746 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:25,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T03:48:25,754 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:48:25,754 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:48:25,754 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:48:25,755 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:48:25,755 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T03:48:25,756 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T03:48:25,756 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1858445601, stopped=false 2024-11-22T03:48:25,756 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c85114ed5096,39239,1732247267914 2024-11-22T03:48:25,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:48:25,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:48:25,758 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43241-0x100658b06a30002, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:48:25,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:48:25,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:48:25,759 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43241-0x100658b06a30002, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:48:25,759 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:48:25,759 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43241-0x100658b06a30002, quorum=127.0.0.1:57058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:48:25,759 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:48:25,759 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:48:25,759 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:48:25,759 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:48:25,760 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:48:25,760 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c85114ed5096,35197,1732247267957' ***** 2024-11-22T03:48:25,760 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T03:48:25,760 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c85114ed5096,43241,1732247268913' ***** 2024-11-22T03:48:25,760 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T03:48:25,760 INFO [RS:1;c85114ed5096:43241 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T03:48:25,760 INFO [RS:0;c85114ed5096:35197 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T03:48:25,760 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T03:48:25,760 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T03:48:25,760 INFO [RS:1;c85114ed5096:43241 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T03:48:25,761 INFO [RS:0;c85114ed5096:35197 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T03:48:25,761 INFO [RS:1;c85114ed5096:43241 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T03:48:25,761 INFO [RS:0;c85114ed5096:35197 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T03:48:25,761 INFO [RS:1;c85114ed5096:43241 {}] regionserver.HRegionServer(959): stopping server c85114ed5096,43241,1732247268913 2024-11-22T03:48:25,761 INFO [RS:1;c85114ed5096:43241 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:48:25,761 INFO [RS:0;c85114ed5096:35197 {}] regionserver.HRegionServer(3091): Received CLOSE for 6fd9fc11d70fd9a0496bcc8ab732b0af 2024-11-22T03:48:25,761 INFO [RS:1;c85114ed5096:43241 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;c85114ed5096:43241. 2024-11-22T03:48:25,761 DEBUG [RS:1;c85114ed5096:43241 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:48:25,761 DEBUG [RS:1;c85114ed5096:43241 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:48:25,761 INFO [RS:0;c85114ed5096:35197 {}] regionserver.HRegionServer(959): stopping server c85114ed5096,35197,1732247267957 2024-11-22T03:48:25,761 INFO [RS:1;c85114ed5096:43241 {}] regionserver.HRegionServer(976): stopping server c85114ed5096,43241,1732247268913; all regions closed. 2024-11-22T03:48:25,761 INFO [RS:0;c85114ed5096:35197 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:48:25,761 INFO [RS:0;c85114ed5096:35197 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c85114ed5096:35197. 2024-11-22T03:48:25,761 DEBUG [RS:0;c85114ed5096:35197 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:48:25,761 DEBUG [RS:0;c85114ed5096:35197 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:48:25,761 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6fd9fc11d70fd9a0496bcc8ab732b0af, disabling compactions & flushes 2024-11-22T03:48:25,761 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af. 2024-11-22T03:48:25,761 INFO [RS:0;c85114ed5096:35197 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T03:48:25,761 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af. 2024-11-22T03:48:25,761 INFO [RS:0;c85114ed5096:35197 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T03:48:25,761 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af. after waiting 0 ms 2024-11-22T03:48:25,761 INFO [RS:0;c85114ed5096:35197 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T03:48:25,761 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af. 2024-11-22T03:48:25,761 INFO [RS:0;c85114ed5096:35197 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T03:48:25,761 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 6fd9fc11d70fd9a0496bcc8ab732b0af 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-22T03:48:25,761 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:25,762 INFO [RS:0;c85114ed5096:35197 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-22T03:48:25,762 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:48:25,762 DEBUG [RS:0;c85114ed5096:35197 {}] regionserver.HRegionServer(1325): Online Regions={6fd9fc11d70fd9a0496bcc8ab732b0af=TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af., 1588230740=hbase:meta,,1.1588230740} 2024-11-22T03:48:25,762 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:25,762 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:48:25,762 DEBUG [RS:0;c85114ed5096:35197 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6fd9fc11d70fd9a0496bcc8ab732b0af 2024-11-22T03:48:25,762 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:48:25,762 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:25,762 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:48:25,762 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:48:25,762 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:25,762 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:25,762 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-22T03:48:25,762 ERROR [FSHLog-0-hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360-prefix:c85114ed5096,35197,1732247267957.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:25,762 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:25,762 WARN [FSHLog-0-hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360-prefix:c85114ed5096,35197,1732247267957.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:25,762 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:25,763 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 2024-11-22T03:48:25,763 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c85114ed5096%2C35197%2C1732247267957.meta:.meta(num 1732247268744) roll requested 2024-11-22T03:48:25,763 INFO [regionserver/c85114ed5096:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C35197%2C1732247267957.meta.1732247305763.meta 2024-11-22T03:48:25,763 WARN [IPC Server handler 2 on default port 43749 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 has not been closed. Lease recovery is in progress. RecoveryId = 1080 for block blk_1073741837_1013 2024-11-22T03:48:25,763 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 after 0ms 2024-11-22T03:48:25,767 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/199990e98779412b9a7c2cbe6bf1e572 is 1080, key is row0015/info:/1732247305531/Put/seqid=0 2024-11-22T03:48:25,771 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:25,771 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:25,771 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:25,771 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:25,771 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:25,772 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247305763.meta 2024-11-22T03:48:25,772 WARN [Thread-1037 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1082 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43531 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:25,772 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:48636 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741897_1082] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data4]'}, localName='127.0.0.1:33491', datanodeUuid='4f278f4c-34ba-4fc6-8845-7d0a79f5af99', xmitsInProgress=0}:Exception transferring block BP-1396528010-172.17.0.2-1732247267310:blk_1073741897_1082 to mirror 127.0.0.1:43531 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:25,772 WARN [Thread-1037 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741897_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33491,DS-5ddb5d59-a71a-4043-9647-298809519964,DISK], DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]) is bad. 2024-11-22T03:48:25,772 WARN [Thread-1037 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741897_1082 2024-11-22T03:48:25,772 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:25,772 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:48636 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741897_1082] {}] datanode.BlockReceiver(316): Block 1073741897 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:48:25,772 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34635,DS-6482e06d-ff00-4d4b-a4ce-37b475d487a2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:25,772 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta 2024-11-22T03:48:25,772 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:48636 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741897_1082] {}] datanode.DataXceiver(331): 127.0.0.1:33491:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48636 dst: /127.0.0.1:33491 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:25,773 WARN [Thread-1037 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK] 2024-11-22T03:48:25,773 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40439:40439),(127.0.0.1/127.0.0.1:40197:40197)] 2024-11-22T03:48:25,773 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta is not closed yet, will try archiving it next time 2024-11-22T03:48:25,773 WARN [IPC Server handler 0 on default port 43749 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta has not been closed. Lease recovery is in progress. RecoveryId = 1083 for block blk_1073741834_1010 2024-11-22T03:48:25,773 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta after 1ms 2024-11-22T03:48:25,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741898_1084 (size=14660) 2024-11-22T03:48:25,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33491 is added to blk_1073741898_1084 (size=14660) 2024-11-22T03:48:25,778 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/199990e98779412b9a7c2cbe6bf1e572 2024-11-22T03:48:25,786 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/.tmp/info/199990e98779412b9a7c2cbe6bf1e572 as hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/199990e98779412b9a7c2cbe6bf1e572 2024-11-22T03:48:25,792 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/199990e98779412b9a7c2cbe6bf1e572, entries=9, sequenceid=78, filesize=14.3 K 2024-11-22T03:48:25,794 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 6fd9fc11d70fd9a0496bcc8ab732b0af in 33ms, sequenceid=78, compaction requested=true 2024-11-22T03:48:25,794 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/af8ccb4964bf42cc9c5d12ee5894ef47, hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/bbdaa9a81dd14d3aa015d29a6da1661c, hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/2baa085778b24d22a19bb6189547075e, hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/90b5e45180b14cc59a6264ce5af3be26, hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/ed8c02d319e242e1a946bfb4552e34a2, hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/f8df5916ca99462db132e7a8bc8b2d84] to archive 2024-11-22T03:48:25,795 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T03:48:25,797 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/af8ccb4964bf42cc9c5d12ee5894ef47 to hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/af8ccb4964bf42cc9c5d12ee5894ef47 2024-11-22T03:48:25,799 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/bbdaa9a81dd14d3aa015d29a6da1661c to hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/bbdaa9a81dd14d3aa015d29a6da1661c 2024-11-22T03:48:25,799 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740/.tmp/info/aa2ce72b79984f1a967bc4e9bcfa47ba is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af./info:regioninfo/1732247269807/Put/seqid=0 2024-11-22T03:48:25,800 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/2baa085778b24d22a19bb6189547075e to hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/2baa085778b24d22a19bb6189547075e 2024-11-22T03:48:25,802 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/90b5e45180b14cc59a6264ce5af3be26 to hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/90b5e45180b14cc59a6264ce5af3be26 2024-11-22T03:48:25,802 WARN [Thread-1050 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43531 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:25,802 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:35890 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741899_1085] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data6]'}, localName='127.0.0.1:42385', datanodeUuid='f8752521-28ed-48c6-b362-b2f1a86d4921', xmitsInProgress=0}:Exception transferring block BP-1396528010-172.17.0.2-1732247267310:blk_1073741899_1085 to mirror 127.0.0.1:43531 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:25,802 WARN [Thread-1050 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK], DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]) is bad. 2024-11-22T03:48:25,802 WARN [Thread-1050 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741899_1085 2024-11-22T03:48:25,802 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:35890 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741899_1085] {}] datanode.BlockReceiver(316): Block 1073741899 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:48:25,802 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:35890 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741899_1085] {}] datanode.DataXceiver(331): 127.0.0.1:42385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35890 dst: /127.0.0.1:42385 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:25,803 WARN [Thread-1050 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK] 2024-11-22T03:48:25,803 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/ed8c02d319e242e1a946bfb4552e34a2 to hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/ed8c02d319e242e1a946bfb4552e34a2 2024-11-22T03:48:25,805 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/f8df5916ca99462db132e7a8bc8b2d84 to hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/info/f8df5916ca99462db132e7a8bc8b2d84 2024-11-22T03:48:25,805 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c85114ed5096:39239 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-22T03:48:25,806 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [af8ccb4964bf42cc9c5d12ee5894ef47=10347, bbdaa9a81dd14d3aa015d29a6da1661c=12506, 2baa085778b24d22a19bb6189547075e=17994, 90b5e45180b14cc59a6264ce5af3be26=6027, ed8c02d319e242e1a946bfb4552e34a2=6027, f8df5916ca99462db132e7a8bc8b2d84=6027] 2024-11-22T03:48:25,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33491 is added to blk_1073741900_1086 (size=7089) 2024-11-22T03:48:25,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741900_1086 (size=7089) 2024-11-22T03:48:25,812 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740/.tmp/info/aa2ce72b79984f1a967bc4e9bcfa47ba 2024-11-22T03:48:25,814 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6fd9fc11d70fd9a0496bcc8ab732b0af/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-11-22T03:48:25,814 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af. 2024-11-22T03:48:25,815 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6fd9fc11d70fd9a0496bcc8ab732b0af: Waiting for close lock at 1732247305761Running coprocessor pre-close hooks at 1732247305761Disabling compacts and flushes for region at 1732247305761Disabling writes for close at 1732247305761Obtaining lock to block concurrent updates at 1732247305761Preparing flush snapshotting stores in 6fd9fc11d70fd9a0496bcc8ab732b0af at 1732247305761Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1732247305762 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af. at 1732247305762Flushing 6fd9fc11d70fd9a0496bcc8ab732b0af/info: creating writer at 1732247305763 (+1 ms)Flushing 6fd9fc11d70fd9a0496bcc8ab732b0af/info: appending metadata at 1732247305766 (+3 ms)Flushing 6fd9fc11d70fd9a0496bcc8ab732b0af/info: closing flushed file at 1732247305766Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f3eec10: reopening flushed file at 1732247305785 (+19 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 6fd9fc11d70fd9a0496bcc8ab732b0af in 33ms, sequenceid=78, compaction requested=true at 1732247305794 (+9 ms)Writing region close event to WAL at 1732247305809 (+15 ms)Running coprocessor post-close hooks at 1732247305814 (+5 ms)Closed at 1732247305814 2024-11-22T03:48:25,815 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732247269039.6fd9fc11d70fd9a0496bcc8ab732b0af. 2024-11-22T03:48:25,831 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740/.tmp/ns/c0a244ce219140708219dcf40ef79963 is 43, key is default/ns:d/1732247268839/Put/seqid=0 2024-11-22T03:48:25,834 WARN [Thread-1058 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1087 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43531 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:25,834 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:35910 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741901_1087] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data6]'}, localName='127.0.0.1:42385', datanodeUuid='f8752521-28ed-48c6-b362-b2f1a86d4921', xmitsInProgress=0}:Exception transferring block BP-1396528010-172.17.0.2-1732247267310:blk_1073741901_1087 to mirror 127.0.0.1:43531 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:25,834 WARN [Thread-1058 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741901_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK], DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]) is bad. 2024-11-22T03:48:25,834 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:35910 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741901_1087] {}] datanode.BlockReceiver(316): Block 1073741901 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:48:25,834 WARN [Thread-1058 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741901_1087 2024-11-22T03:48:25,835 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:35910 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741901_1087] {}] datanode.DataXceiver(331): 127.0.0.1:42385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35910 dst: /127.0.0.1:42385 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:25,835 WARN [Thread-1058 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK] 2024-11-22T03:48:25,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33491 is added to blk_1073741902_1088 (size=5153) 2024-11-22T03:48:25,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741902_1088 (size=5153) 2024-11-22T03:48:25,841 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740/.tmp/ns/c0a244ce219140708219dcf40ef79963 2024-11-22T03:48:25,860 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740/.tmp/table/ce016c9e7727439381cdb4d97449bfbc is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732247269823/Put/seqid=0 2024-11-22T03:48:25,862 WARN [Thread-1065 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1089 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43531 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:25,862 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:35916 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741903_1089] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data6]'}, localName='127.0.0.1:42385', datanodeUuid='f8752521-28ed-48c6-b362-b2f1a86d4921', xmitsInProgress=0}:Exception transferring block BP-1396528010-172.17.0.2-1732247267310:blk_1073741903_1089 to mirror 127.0.0.1:43531 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:25,863 WARN [Thread-1065 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1396528010-172.17.0.2-1732247267310:blk_1073741903_1089 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42385,DS-66fc4285-b115-478e-ae49-651241a2a320,DISK], DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK]) is bad. 2024-11-22T03:48:25,863 WARN [Thread-1065 {}] hdfs.DataStreamer(1850): Abandoning BP-1396528010-172.17.0.2-1732247267310:blk_1073741903_1089 2024-11-22T03:48:25,863 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:35916 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741903_1089] {}] datanode.BlockReceiver(316): Block 1073741903 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T03:48:25,863 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1351551173_22 at /127.0.0.1:35916 [Receiving block BP-1396528010-172.17.0.2-1732247267310:blk_1073741903_1089] {}] datanode.DataXceiver(331): 127.0.0.1:42385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35916 dst: /127.0.0.1:42385 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:25,863 WARN [Thread-1065 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43531,DS-8d2d9fbd-baca-43f2-b19a-b6aac051826b,DISK] 2024-11-22T03:48:25,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33491 is added to blk_1073741904_1090 (size=5424) 2024-11-22T03:48:25,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741904_1090 (size=5424) 2024-11-22T03:48:25,868 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740/.tmp/table/ce016c9e7727439381cdb4d97449bfbc 2024-11-22T03:48:25,876 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740/.tmp/info/aa2ce72b79984f1a967bc4e9bcfa47ba as hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740/info/aa2ce72b79984f1a967bc4e9bcfa47ba 2024-11-22T03:48:25,881 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740/info/aa2ce72b79984f1a967bc4e9bcfa47ba, entries=10, sequenceid=11, filesize=6.9 K 2024-11-22T03:48:25,882 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740/.tmp/ns/c0a244ce219140708219dcf40ef79963 as hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740/ns/c0a244ce219140708219dcf40ef79963 2024-11-22T03:48:25,888 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740/ns/c0a244ce219140708219dcf40ef79963, entries=2, sequenceid=11, filesize=5.0 K 2024-11-22T03:48:25,889 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740/.tmp/table/ce016c9e7727439381cdb4d97449bfbc as hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740/table/ce016c9e7727439381cdb4d97449bfbc 2024-11-22T03:48:25,897 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740/table/ce016c9e7727439381cdb4d97449bfbc, entries=2, sequenceid=11, filesize=5.3 K 2024-11-22T03:48:25,898 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 136ms, sequenceid=11, compaction requested=false 2024-11-22T03:48:25,905 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-22T03:48:25,905 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:48:25,905 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:48:25,906 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732247305762Running coprocessor pre-close hooks at 1732247305762Disabling compacts and flushes for region at 1732247305762Disabling writes for close at 1732247305762Obtaining lock to block concurrent updates at 1732247305762Preparing flush snapshotting stores in 1588230740 at 1732247305762Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732247305762Flushing stores of hbase:meta,,1.1588230740 at 1732247305781 (+19 ms)Flushing 1588230740/info: creating writer at 1732247305781Flushing 1588230740/info: appending metadata at 1732247305798 (+17 ms)Flushing 1588230740/info: closing flushed file at 1732247305798Flushing 1588230740/ns: creating writer at 1732247305818 (+20 ms)Flushing 1588230740/ns: appending metadata at 1732247305831 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1732247305831Flushing 1588230740/table: creating writer at 1732247305846 (+15 ms)Flushing 1588230740/table: appending metadata at 1732247305859 (+13 ms)Flushing 1588230740/table: closing flushed file at 1732247305859Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d0c2e3b: reopening flushed file at 1732247305875 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d88650c: reopening flushed file at 1732247305882 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6d7de90d: reopening flushed file at 1732247305888 (+6 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 136ms, sequenceid=11, compaction requested=false at 1732247305898 (+10 ms)Writing region close event to WAL at 1732247305901 (+3 ms)Running coprocessor post-close hooks at 1732247305905 (+4 ms)Closed at 1732247305905 2024-11-22T03:48:25,906 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T03:48:25,926 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.1732247289725 to hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/oldWALs/c85114ed5096%2C35197%2C1732247267957.1732247289725 2024-11-22T03:48:25,962 INFO [RS:0;c85114ed5096:35197 {}] regionserver.HRegionServer(976): stopping server c85114ed5096,35197,1732247267957; all regions closed. 2024-11-22T03:48:25,963 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:25,963 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:25,963 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:25,963 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:25,963 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:25,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741896_1081 (size=825) 2024-11-22T03:48:25,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33491 is added to blk_1073741896_1081 (size=825) 2024-11-22T03:48:26,013 INFO [regionserver/c85114ed5096:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T03:48:26,013 INFO [regionserver/c85114ed5096:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T03:48:26,212 INFO [regionserver/c85114ed5096:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T03:48:26,212 INFO [regionserver/c85114ed5096:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T03:48:26,212 INFO [regionserver/c85114ed5096:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:48:26,999 INFO [regionserver/c85114ed5096:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:48:27,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:48:27,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:48:27,335 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5c52b36a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42385, datanodeUuid=f8752521-28ed-48c6-b362-b2f1a86d4921, infoPort=40197, infoSecurePort=0, ipcPort=44215, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310):Failed to transfer BP-1396528010-172.17.0.2-1732247267310:blk_1073741878_1061 to 127.0.0.1:43531 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:28,066 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@304d854b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33491, datanodeUuid=4f278f4c-34ba-4fc6-8845-7d0a79f5af99, infoPort=40439, infoSecurePort=0, ipcPort=42067, storageInfo=lv=-57;cid=testClusterID;nsid=1482075187;c=1732247267310):Failed to transfer BP-1396528010-172.17.0.2-1732247267310:blk_1073741827_1003 to 127.0.0.1:43531 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:28,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:48:28,749 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-22T03:48:28,751 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:48:28,752 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T03:48:28,861 INFO [master/c85114ed5096:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T03:48:28,861 INFO [master/c85114ed5096:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T03:48:29,765 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 after 4002ms 2024-11-22T03:48:29,775 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta after 4003ms 2024-11-22T03:48:30,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:48:30,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741835_1011 (size=393) 2024-11-22T03:48:30,763 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-22T03:48:30,770 DEBUG [RS:1;c85114ed5096:43241 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/oldWALs 2024-11-22T03:48:30,770 INFO [RS:1;c85114ed5096:43241 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c85114ed5096%2C43241%2C1732247268913:(num 1732247269140) 2024-11-22T03:48:30,770 DEBUG [RS:1;c85114ed5096:43241 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:48:30,770 INFO [RS:1;c85114ed5096:43241 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:48:30,770 INFO [RS:1;c85114ed5096:43241 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:48:30,771 INFO [RS:1;c85114ed5096:43241 {}] hbase.ChoreService(370): Chore service for: regionserver/c85114ed5096:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T03:48:30,772 INFO [RS:1;c85114ed5096:43241 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T03:48:30,772 INFO [RS:1;c85114ed5096:43241 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T03:48:30,772 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:48:30,772 INFO [RS:1;c85114ed5096:43241 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T03:48:30,772 INFO [RS:1;c85114ed5096:43241 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:48:30,773 INFO [RS:1;c85114ed5096:43241 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43241 2024-11-22T03:48:30,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:48:30,775 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43241-0x100658b06a30002, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c85114ed5096,43241,1732247268913 2024-11-22T03:48:30,775 INFO [RS:1;c85114ed5096:43241 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:48:30,776 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c85114ed5096,43241,1732247268913] 2024-11-22T03:48:30,776 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c85114ed5096,43241,1732247268913 already deleted, retry=false 2024-11-22T03:48:30,777 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c85114ed5096,43241,1732247268913 expired; onlineServers=1 2024-11-22T03:48:30,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:30,829 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:30,829 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:30,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:30,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:30,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:30,838 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:30,838 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:30,876 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43241-0x100658b06a30002, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:48:30,876 INFO [RS:1;c85114ed5096:43241 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:48:30,876 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43241-0x100658b06a30002, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:48:30,876 INFO [RS:1;c85114ed5096:43241 {}] regionserver.HRegionServer(1031): Exiting; stopping=c85114ed5096,43241,1732247268913; zookeeper connection closed. 2024-11-22T03:48:30,876 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@684bb9cf {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@684bb9cf 2024-11-22T03:48:30,964 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-22T03:48:30,967 DEBUG [RS:0;c85114ed5096:35197 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/oldWALs 2024-11-22T03:48:30,967 INFO [RS:0;c85114ed5096:35197 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c85114ed5096%2C35197%2C1732247267957.meta:.meta(num 1732247305763) 2024-11-22T03:48:30,967 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:30,967 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:30,967 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:30,968 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:30,968 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:30,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33491 is added to blk_1073741893_1077 (size=14682) 2024-11-22T03:48:30,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741893_1077 (size=14682) 2024-11-22T03:48:30,973 DEBUG [RS:0;c85114ed5096:35197 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/oldWALs 2024-11-22T03:48:30,973 INFO [RS:0;c85114ed5096:35197 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c85114ed5096%2C35197%2C1732247267957:(num 1732247305504) 2024-11-22T03:48:30,973 DEBUG [RS:0;c85114ed5096:35197 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:48:30,973 INFO [RS:0;c85114ed5096:35197 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:48:30,973 INFO [RS:0;c85114ed5096:35197 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:48:30,973 INFO [RS:0;c85114ed5096:35197 {}] hbase.ChoreService(370): Chore service for: regionserver/c85114ed5096:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T03:48:30,973 INFO [RS:0;c85114ed5096:35197 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:48:30,974 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:48:30,974 INFO [RS:0;c85114ed5096:35197 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35197 2024-11-22T03:48:30,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c85114ed5096,35197,1732247267957 2024-11-22T03:48:30,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:48:30,975 INFO [RS:0;c85114ed5096:35197 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:48:30,976 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c85114ed5096,35197,1732247267957] 2024-11-22T03:48:30,977 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c85114ed5096,35197,1732247267957 already deleted, retry=false 2024-11-22T03:48:30,977 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c85114ed5096,35197,1732247267957 expired; onlineServers=0 2024-11-22T03:48:30,977 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c85114ed5096,39239,1732247267914' ***** 2024-11-22T03:48:30,977 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T03:48:30,977 INFO [M:0;c85114ed5096:39239 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:48:30,977 INFO [M:0;c85114ed5096:39239 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:48:30,977 DEBUG [M:0;c85114ed5096:39239 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T03:48:30,978 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T03:48:30,978 DEBUG [M:0;c85114ed5096:39239 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T03:48:30,978 DEBUG [master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247268124 {}] cleaner.HFileCleaner(306): Exit Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247268124,5,FailOnTimeoutGroup] 2024-11-22T03:48:30,978 DEBUG [master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247268124 {}] cleaner.HFileCleaner(306): Exit Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247268124,5,FailOnTimeoutGroup] 2024-11-22T03:48:30,978 INFO [M:0;c85114ed5096:39239 {}] hbase.ChoreService(370): Chore service for: master/c85114ed5096:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T03:48:30,978 INFO [M:0;c85114ed5096:39239 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:48:30,978 DEBUG [M:0;c85114ed5096:39239 {}] master.HMaster(1795): Stopping service threads 2024-11-22T03:48:30,978 INFO [M:0;c85114ed5096:39239 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T03:48:30,978 INFO [M:0;c85114ed5096:39239 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:48:30,979 INFO [M:0;c85114ed5096:39239 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T03:48:30,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T03:48:30,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:48:30,979 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T03:48:30,979 DEBUG [M:0;c85114ed5096:39239 {}] zookeeper.ZKUtil(347): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T03:48:30,979 WARN [M:0;c85114ed5096:39239 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T03:48:30,980 INFO [M:0;c85114ed5096:39239 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/.lastflushedseqids 2024-11-22T03:48:30,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33491 is added to blk_1073741905_1091 (size=130) 2024-11-22T03:48:30,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741905_1091 (size=130) 2024-11-22T03:48:30,986 INFO [M:0;c85114ed5096:39239 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T03:48:30,986 INFO [M:0;c85114ed5096:39239 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T03:48:30,986 DEBUG [M:0;c85114ed5096:39239 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:48:30,986 INFO [M:0;c85114ed5096:39239 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:48:30,986 DEBUG [M:0;c85114ed5096:39239 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:48:30,986 DEBUG [M:0;c85114ed5096:39239 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:48:30,986 DEBUG [M:0;c85114ed5096:39239 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:48:30,986 INFO [M:0;c85114ed5096:39239 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-22T03:48:31,002 DEBUG [M:0;c85114ed5096:39239 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/54165e831a164fb297f7d6e6f5a66184 is 82, key is hbase:meta,,1/info:regioninfo/1732247268814/Put/seqid=0 2024-11-22T03:48:31,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741906_1092 (size=5672) 2024-11-22T03:48:31,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33491 is added to blk_1073741906_1092 (size=5672) 2024-11-22T03:48:31,007 INFO [M:0;c85114ed5096:39239 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/54165e831a164fb297f7d6e6f5a66184 2024-11-22T03:48:31,027 DEBUG [M:0;c85114ed5096:39239 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5e1c07766cb54a368f0d18f6b3526890 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732247269830/Put/seqid=0 2024-11-22T03:48:31,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741907_1093 (size=6255) 2024-11-22T03:48:31,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33491 is added to blk_1073741907_1093 (size=6255) 2024-11-22T03:48:31,033 INFO [M:0;c85114ed5096:39239 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5e1c07766cb54a368f0d18f6b3526890 2024-11-22T03:48:31,039 INFO [M:0;c85114ed5096:39239 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5e1c07766cb54a368f0d18f6b3526890 2024-11-22T03:48:31,058 DEBUG [M:0;c85114ed5096:39239 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/383298d65f784a9d987101703c6f5f20 is 69, key is c85114ed5096,35197,1732247267957/rs:state/1732247268193/Put/seqid=0 2024-11-22T03:48:31,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741908_1094 (size=5224) 2024-11-22T03:48:31,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33491 is added to blk_1073741908_1094 (size=5224) 2024-11-22T03:48:31,064 INFO [M:0;c85114ed5096:39239 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/383298d65f784a9d987101703c6f5f20 2024-11-22T03:48:31,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:48:31,076 INFO [RS:0;c85114ed5096:35197 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:48:31,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35197-0x100658b06a30001, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:48:31,076 INFO [RS:0;c85114ed5096:35197 {}] regionserver.HRegionServer(1031): Exiting; stopping=c85114ed5096,35197,1732247267957; zookeeper connection closed. 2024-11-22T03:48:31,077 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4bdfe20d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4bdfe20d 2024-11-22T03:48:31,077 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-22T03:48:31,082 DEBUG [M:0;c85114ed5096:39239 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/faa8b2211bb44c4faf34e34378663695 is 52, key is load_balancer_on/state:d/1732247268890/Put/seqid=0 2024-11-22T03:48:31,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741909_1095 (size=5056) 2024-11-22T03:48:31,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33491 is added to blk_1073741909_1095 (size=5056) 2024-11-22T03:48:31,088 INFO [M:0;c85114ed5096:39239 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/faa8b2211bb44c4faf34e34378663695 2024-11-22T03:48:31,094 DEBUG [M:0;c85114ed5096:39239 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/54165e831a164fb297f7d6e6f5a66184 as hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/54165e831a164fb297f7d6e6f5a66184 2024-11-22T03:48:31,099 INFO [M:0;c85114ed5096:39239 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/54165e831a164fb297f7d6e6f5a66184, entries=8, sequenceid=60, filesize=5.5 K 2024-11-22T03:48:31,100 DEBUG [M:0;c85114ed5096:39239 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5e1c07766cb54a368f0d18f6b3526890 as hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5e1c07766cb54a368f0d18f6b3526890 2024-11-22T03:48:31,106 INFO [M:0;c85114ed5096:39239 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5e1c07766cb54a368f0d18f6b3526890 2024-11-22T03:48:31,107 INFO [M:0;c85114ed5096:39239 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5e1c07766cb54a368f0d18f6b3526890, entries=6, sequenceid=60, filesize=6.1 K 2024-11-22T03:48:31,108 DEBUG [M:0;c85114ed5096:39239 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/383298d65f784a9d987101703c6f5f20 as hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/383298d65f784a9d987101703c6f5f20 2024-11-22T03:48:31,114 INFO [M:0;c85114ed5096:39239 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/383298d65f784a9d987101703c6f5f20, entries=2, sequenceid=60, filesize=5.1 K 2024-11-22T03:48:31,115 DEBUG [M:0;c85114ed5096:39239 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/faa8b2211bb44c4faf34e34378663695 as hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/faa8b2211bb44c4faf34e34378663695 2024-11-22T03:48:31,121 INFO [M:0;c85114ed5096:39239 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/faa8b2211bb44c4faf34e34378663695, entries=1, sequenceid=60, filesize=4.9 K 2024-11-22T03:48:31,122 INFO [M:0;c85114ed5096:39239 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 136ms, sequenceid=60, compaction requested=false 2024-11-22T03:48:31,123 INFO [M:0;c85114ed5096:39239 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:48:31,124 DEBUG [M:0;c85114ed5096:39239 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732247310986Disabling compacts and flushes for region at 1732247310986Disabling writes for close at 1732247310986Obtaining lock to block concurrent updates at 1732247310986Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732247310986Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1732247310987 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732247310987Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732247310987Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732247311002 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732247311002Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732247311012 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732247311026 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732247311026Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732247311039 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732247311057 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732247311057Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732247311069 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732247311082 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732247311082Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76065521: reopening flushed file at 1732247311093 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@160061cd: reopening flushed file at 1732247311099 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@692102e0: reopening flushed file at 1732247311107 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@555a9c39: reopening flushed file at 1732247311114 (+7 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 136ms, sequenceid=60, compaction requested=false at 1732247311122 (+8 ms)Writing region close event to WAL at 1732247311123 (+1 ms)Closed at 1732247311123 2024-11-22T03:48:31,124 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:31,124 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:31,124 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:31,124 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:31,124 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:31,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42385 is added to blk_1073741891_1074 (size=1045) 2024-11-22T03:48:31,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33491 is added to blk_1073741891_1074 (size=1045) 2024-11-22T03:48:31,341 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T03:48:31,364 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:31,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:31,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:31,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:31,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:31,369 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:31,369 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:31,371 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:31,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:31,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:32,089 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@48f1e2bf {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1396528010-172.17.0.2-1732247267310:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:34635,null,null]) java.net.ConnectException: Call From c85114ed5096/172.17.0.2 to localhost:34719 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-22T03:48:32,158 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/WALs/c85114ed5096,39239,1732247267914/c85114ed5096%2C39239%2C1732247267914.1732247268051 to hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/oldWALs/c85114ed5096%2C39239%2C1732247267914.1732247268051 2024-11-22T03:48:32,166 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/MasterData/oldWALs/c85114ed5096%2C39239%2C1732247267914.1732247268051 to hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/oldWALs/c85114ed5096%2C39239%2C1732247267914.1732247268051$masterlocalwal$ 2024-11-22T03:48:32,167 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:48:32,167 INFO [M:0;c85114ed5096:39239 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T03:48:32,167 INFO [M:0;c85114ed5096:39239 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39239 2024-11-22T03:48:32,167 INFO [M:0;c85114ed5096:39239 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:48:32,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:48:32,270 INFO [M:0;c85114ed5096:39239 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:48:32,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39239-0x100658b06a30000, quorum=127.0.0.1:57058, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:48:32,277 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@90b14a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:48:32,278 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@f8e3d4c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:48:32,278 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:48:32,278 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@664f3a1c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:48:32,279 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d479cab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/hadoop.log.dir/,STOPPED} 2024-11-22T03:48:32,281 WARN [BP-1396528010-172.17.0.2-1732247267310 heartbeating to localhost/127.0.0.1:43749 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:48:32,281 WARN [BP-1396528010-172.17.0.2-1732247267310 heartbeating to localhost/127.0.0.1:43749 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1396528010-172.17.0.2-1732247267310 (Datanode Uuid 4f278f4c-34ba-4fc6-8845-7d0a79f5af99) service to localhost/127.0.0.1:43749 2024-11-22T03:48:32,281 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7c65fa8e {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1396528010-172.17.0.2-1732247267310:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:34635,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:34719 , LocalHost:localPort c85114ed5096/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-22T03:48:32,281 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7c65fa8e {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1396528010-172.17.0.2-1732247267310:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:33491,null,null], DatanodeInfoWithStorage[127.0.0.1:34635,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-1396528010-172.17.0.2-1732247267310 2024-11-22T03:48:32,281 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7c65fa8e {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1396528010-172.17.0.2-1732247267310:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:33491,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1396528010-172.17.0.2-1732247267310 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:32,281 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7c65fa8e {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1396528010-172.17.0.2-1732247267310:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:34635,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1396528010-172.17.0.2-1732247267310 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:32,281 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7c65fa8e {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1396528010-172.17.0.2-1732247267310:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:33491,null,null], DatanodeInfoWithStorage[127.0.0.1:34635,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1396528010-172.17.0.2-1732247267310:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:33491,null,null], DatanodeInfoWithStorage[127.0.0.1:34635,null,null]] 2024-11-22T03:48:32,282 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data3/current/BP-1396528010-172.17.0.2-1732247267310 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:48:32,282 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:48:32,282 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:48:32,282 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data4/current/BP-1396528010-172.17.0.2-1732247267310 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:48:32,282 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:48:32,284 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1fdaf9dc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:48:32,284 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a4a97e4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:48:32,284 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:48:32,284 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e536a4b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:48:32,284 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f3178e6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/hadoop.log.dir/,STOPPED} 2024-11-22T03:48:32,285 WARN [BP-1396528010-172.17.0.2-1732247267310 heartbeating to localhost/127.0.0.1:43749 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:48:32,286 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:48:32,286 WARN [BP-1396528010-172.17.0.2-1732247267310 heartbeating to localhost/127.0.0.1:43749 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1396528010-172.17.0.2-1732247267310 (Datanode Uuid f8752521-28ed-48c6-b362-b2f1a86d4921) service to localhost/127.0.0.1:43749 2024-11-22T03:48:32,286 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:48:32,286 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data5/current/BP-1396528010-172.17.0.2-1732247267310 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:48:32,286 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/cluster_c8679232-e73d-f1ae-a114-3687747a3819/data/data6/current/BP-1396528010-172.17.0.2-1732247267310 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:48:32,287 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:48:32,292 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1283c476{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:48:32,292 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@74c4bb5f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:48:32,292 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:48:32,292 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bc4af61{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:48:32,293 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f4b9244{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/hadoop.log.dir/,STOPPED} 2024-11-22T03:48:32,299 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T03:48:32,328 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T03:48:32,335 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 78) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43749 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43749 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43749 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:43749 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43749 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f0254bf55b8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37489 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f0254bf55b8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:43749 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:43749 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:43749 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:37489 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:43749 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43749 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:43749 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=448 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=81 (was 146), ProcessCount=11 (was 11), AvailableMemoryMB=2929 (was 3782) 2024-11-22T03:48:32,342 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=448, MaxFileDescriptor=1048576, SystemLoadAverage=81, ProcessCount=11, AvailableMemoryMB=2929 2024-11-22T03:48:32,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T03:48:32,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/hadoop.log.dir so I do NOT create it in target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75 2024-11-22T03:48:32,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2fa3c0a0-4b0b-53e7-4337-f7320844139a/hadoop.tmp.dir so I do NOT create it in target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75 2024-11-22T03:48:32,342 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/cluster_1833d8fb-dda1-899b-b10f-510f7cc75d7f, deleteOnExit=true 2024-11-22T03:48:32,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T03:48:32,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/test.cache.data in system properties and HBase conf 2024-11-22T03:48:32,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T03:48:32,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/hadoop.log.dir in system properties and HBase conf 2024-11-22T03:48:32,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T03:48:32,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T03:48:32,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T03:48:32,343 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T03:48:32,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:48:32,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:48:32,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T03:48:32,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:48:32,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T03:48:32,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T03:48:32,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:48:32,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:48:32,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T03:48:32,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/nfs.dump.dir in system properties and HBase conf 2024-11-22T03:48:32,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/java.io.tmpdir in system properties and HBase conf 2024-11-22T03:48:32,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:48:32,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T03:48:32,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T03:48:32,356 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:48:32,403 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:48:32,408 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:48:32,409 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:48:32,409 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:48:32,409 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:48:32,409 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:48:32,410 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@420b80b0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:48:32,410 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e21b500{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:48:32,505 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@584b55a0{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/java.io.tmpdir/jetty-localhost-36509-hadoop-hdfs-3_4_1-tests_jar-_-any-16038890339085750420/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:48:32,506 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1afc855{HTTP/1.1, (http/1.1)}{localhost:36509} 2024-11-22T03:48:32,506 INFO [Time-limited test {}] server.Server(415): Started @149833ms 2024-11-22T03:48:32,518 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:48:32,565 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:48:32,568 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:48:32,569 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:48:32,569 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:48:32,569 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:48:32,570 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3cc9fb65{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:48:32,571 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@512c947f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:48:32,672 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@265efa03{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/java.io.tmpdir/jetty-localhost-35767-hadoop-hdfs-3_4_1-tests_jar-_-any-5478793379801082276/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:48:32,672 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@34714045{HTTP/1.1, (http/1.1)}{localhost:35767} 2024-11-22T03:48:32,673 INFO [Time-limited test {}] server.Server(415): Started @149999ms 2024-11-22T03:48:32,674 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:48:32,706 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:48:32,709 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:48:32,710 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:48:32,710 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:48:32,710 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:48:32,711 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@585ec7cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:48:32,711 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4da18e2b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:48:32,736 WARN [Thread-1188 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/cluster_1833d8fb-dda1-899b-b10f-510f7cc75d7f/data/data1/current/BP-894353976-172.17.0.2-1732247312367/current, will proceed with Du for space computation calculation, 2024-11-22T03:48:32,736 WARN [Thread-1189 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/cluster_1833d8fb-dda1-899b-b10f-510f7cc75d7f/data/data2/current/BP-894353976-172.17.0.2-1732247312367/current, will proceed with Du for space computation calculation, 2024-11-22T03:48:32,756 WARN [Thread-1167 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:48:32,759 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x12ee60792e44807f with lease ID 0x5982d97bd8242428: Processing first storage report for DS-617819d4-916b-4576-9dac-8f869afd216e from datanode DatanodeRegistration(127.0.0.1:33109, datanodeUuid=9db1947b-5ca6-458d-a2e7-c7d00008effd, infoPort=41009, infoSecurePort=0, ipcPort=35967, storageInfo=lv=-57;cid=testClusterID;nsid=2072024911;c=1732247312367) 2024-11-22T03:48:32,759 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x12ee60792e44807f with lease ID 0x5982d97bd8242428: from storage DS-617819d4-916b-4576-9dac-8f869afd216e node DatanodeRegistration(127.0.0.1:33109, datanodeUuid=9db1947b-5ca6-458d-a2e7-c7d00008effd, infoPort=41009, infoSecurePort=0, ipcPort=35967, storageInfo=lv=-57;cid=testClusterID;nsid=2072024911;c=1732247312367), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:48:32,759 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x12ee60792e44807f with lease ID 0x5982d97bd8242428: Processing first storage report for DS-86215298-ed51-4f3a-88ce-ab012dccf361 from datanode DatanodeRegistration(127.0.0.1:33109, datanodeUuid=9db1947b-5ca6-458d-a2e7-c7d00008effd, infoPort=41009, infoSecurePort=0, ipcPort=35967, storageInfo=lv=-57;cid=testClusterID;nsid=2072024911;c=1732247312367) 2024-11-22T03:48:32,759 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x12ee60792e44807f with lease ID 0x5982d97bd8242428: from storage DS-86215298-ed51-4f3a-88ce-ab012dccf361 node DatanodeRegistration(127.0.0.1:33109, datanodeUuid=9db1947b-5ca6-458d-a2e7-c7d00008effd, infoPort=41009, infoSecurePort=0, ipcPort=35967, storageInfo=lv=-57;cid=testClusterID;nsid=2072024911;c=1732247312367), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:48:32,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:32,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:32,806 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5151c3af{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/java.io.tmpdir/jetty-localhost-36391-hadoop-hdfs-3_4_1-tests_jar-_-any-15668050569740568932/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:48:32,806 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@9952a37{HTTP/1.1, (http/1.1)}{localhost:36391} 2024-11-22T03:48:32,806 INFO [Time-limited test {}] server.Server(415): Started @150133ms 2024-11-22T03:48:32,807 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:48:32,863 WARN [Thread-1214 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/cluster_1833d8fb-dda1-899b-b10f-510f7cc75d7f/data/data3/current/BP-894353976-172.17.0.2-1732247312367/current, will proceed with Du for space computation calculation, 2024-11-22T03:48:32,863 WARN [Thread-1215 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/cluster_1833d8fb-dda1-899b-b10f-510f7cc75d7f/data/data4/current/BP-894353976-172.17.0.2-1732247312367/current, will proceed with Du for space computation calculation, 2024-11-22T03:48:32,884 WARN [Thread-1203 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:48:32,886 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf0cfcfc5bfb4dcdf with lease ID 0x5982d97bd8242429: Processing first storage report for DS-c90cdbcd-6b2e-40ec-a4e7-6e9799ea3822 from datanode DatanodeRegistration(127.0.0.1:42569, datanodeUuid=8f5b92ec-9a64-4f50-bf34-61b6438278c4, infoPort=45023, infoSecurePort=0, ipcPort=36291, storageInfo=lv=-57;cid=testClusterID;nsid=2072024911;c=1732247312367) 2024-11-22T03:48:32,886 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf0cfcfc5bfb4dcdf with lease ID 0x5982d97bd8242429: from storage DS-c90cdbcd-6b2e-40ec-a4e7-6e9799ea3822 node DatanodeRegistration(127.0.0.1:42569, datanodeUuid=8f5b92ec-9a64-4f50-bf34-61b6438278c4, infoPort=45023, infoSecurePort=0, ipcPort=36291, storageInfo=lv=-57;cid=testClusterID;nsid=2072024911;c=1732247312367), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:48:32,886 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf0cfcfc5bfb4dcdf with lease ID 0x5982d97bd8242429: Processing first storage report for DS-130457d9-dfff-42bc-9e58-276a248b05b0 from datanode DatanodeRegistration(127.0.0.1:42569, datanodeUuid=8f5b92ec-9a64-4f50-bf34-61b6438278c4, infoPort=45023, infoSecurePort=0, ipcPort=36291, storageInfo=lv=-57;cid=testClusterID;nsid=2072024911;c=1732247312367) 2024-11-22T03:48:32,886 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf0cfcfc5bfb4dcdf with lease ID 0x5982d97bd8242429: from storage DS-130457d9-dfff-42bc-9e58-276a248b05b0 node DatanodeRegistration(127.0.0.1:42569, datanodeUuid=8f5b92ec-9a64-4f50-bf34-61b6438278c4, infoPort=45023, infoSecurePort=0, ipcPort=36291, storageInfo=lv=-57;cid=testClusterID;nsid=2072024911;c=1732247312367), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:48:32,931 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75 2024-11-22T03:48:32,933 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/cluster_1833d8fb-dda1-899b-b10f-510f7cc75d7f/zookeeper_0, clientPort=53951, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/cluster_1833d8fb-dda1-899b-b10f-510f7cc75d7f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/cluster_1833d8fb-dda1-899b-b10f-510f7cc75d7f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T03:48:32,934 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53951 2024-11-22T03:48:32,934 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:48:32,936 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:48:32,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33109 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:48:32,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42569 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:48:32,945 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394 with version=8 2024-11-22T03:48:32,945 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/hbase-staging 2024-11-22T03:48:32,947 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c85114ed5096:0 server-side Connection retries=45 2024-11-22T03:48:32,947 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:48:32,947 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:48:32,947 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:48:32,947 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:48:32,947 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:48:32,947 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T03:48:32,947 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:48:32,948 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33229 2024-11-22T03:48:32,949 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33229 connecting to ZooKeeper ensemble=127.0.0.1:53951 2024-11-22T03:48:32,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:332290x0, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:48:32,953 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33229-0x100658bb6930000 connected 2024-11-22T03:48:32,966 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:48:32,967 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:48:32,970 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:48:32,970 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394, hbase.cluster.distributed=false 2024-11-22T03:48:32,971 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:48:32,972 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33229 2024-11-22T03:48:32,973 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33229 2024-11-22T03:48:32,973 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33229 2024-11-22T03:48:32,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33229 2024-11-22T03:48:32,975 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33229 2024-11-22T03:48:32,989 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c85114ed5096:0 server-side Connection retries=45 2024-11-22T03:48:32,989 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:48:32,989 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:48:32,989 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:48:32,989 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:48:32,989 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:48:32,989 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T03:48:32,989 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:48:32,991 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38975 2024-11-22T03:48:32,992 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38975 connecting to ZooKeeper ensemble=127.0.0.1:53951 2024-11-22T03:48:32,993 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:48:32,994 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:48:32,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:389750x0, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:48:32,998 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:389750x0, quorum=127.0.0.1:53951, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:48:32,998 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38975-0x100658bb6930001 connected 2024-11-22T03:48:32,998 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T03:48:32,999 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T03:48:33,000 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38975-0x100658bb6930001, quorum=127.0.0.1:53951, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T03:48:33,001 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38975-0x100658bb6930001, quorum=127.0.0.1:53951, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:48:33,002 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38975 2024-11-22T03:48:33,003 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38975 2024-11-22T03:48:33,005 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38975 2024-11-22T03:48:33,006 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38975 2024-11-22T03:48:33,009 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38975 2024-11-22T03:48:33,019 DEBUG [M:0;c85114ed5096:33229 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c85114ed5096:33229 2024-11-22T03:48:33,019 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c85114ed5096,33229,1732247312946 2024-11-22T03:48:33,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38975-0x100658bb6930001, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:48:33,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:48:33,021 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c85114ed5096,33229,1732247312946 2024-11-22T03:48:33,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38975-0x100658bb6930001, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T03:48:33,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:48:33,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38975-0x100658bb6930001, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:48:33,022 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T03:48:33,023 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c85114ed5096,33229,1732247312946 from backup master directory 2024-11-22T03:48:33,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c85114ed5096,33229,1732247312946 2024-11-22T03:48:33,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38975-0x100658bb6930001, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:48:33,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:48:33,023 WARN [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:48:33,023 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c85114ed5096,33229,1732247312946 2024-11-22T03:48:33,027 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/hbase.id] with ID: deb187fc-4c31-426f-a815-e9992112983d 2024-11-22T03:48:33,027 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/.tmp/hbase.id 2024-11-22T03:48:33,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33109 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:48:33,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42569 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:48:33,033 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/.tmp/hbase.id]:[hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/hbase.id] 2024-11-22T03:48:33,044 INFO [master/c85114ed5096:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:48:33,044 INFO [master/c85114ed5096:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T03:48:33,046 INFO [master/c85114ed5096:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-22T03:48:33,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:48:33,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38975-0x100658bb6930001, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:48:33,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33109 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:48:33,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42569 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:48:33,055 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:48:33,056 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T03:48:33,056 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:48:33,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42569 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:48:33,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33109 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:48:33,066 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store 2024-11-22T03:48:33,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33109 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:48:33,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42569 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:48:33,475 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:48:33,475 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:48:33,475 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:48:33,475 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:48:33,475 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:48:33,475 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:48:33,475 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:48:33,475 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732247313475Disabling compacts and flushes for region at 1732247313475Disabling writes for close at 1732247313475Writing region close event to WAL at 1732247313475Closed at 1732247313475 2024-11-22T03:48:33,478 WARN [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/.initializing 2024-11-22T03:48:33,478 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/WALs/c85114ed5096,33229,1732247312946 2024-11-22T03:48:33,483 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C33229%2C1732247312946, suffix=, logDir=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/WALs/c85114ed5096,33229,1732247312946, archiveDir=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/oldWALs, maxLogs=10 2024-11-22T03:48:33,484 INFO [master/c85114ed5096:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C33229%2C1732247312946.1732247313484 2024-11-22T03:48:33,489 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/WALs/c85114ed5096,33229,1732247312946/c85114ed5096%2C33229%2C1732247312946.1732247313484 2024-11-22T03:48:33,490 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41009:41009),(127.0.0.1/127.0.0.1:45023:45023)] 2024-11-22T03:48:33,490 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:48:33,491 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:48:33,491 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:48:33,491 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:48:33,492 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:48:33,493 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T03:48:33,493 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:48:33,494 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:48:33,494 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:48:33,495 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T03:48:33,495 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:48:33,496 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:48:33,496 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:48:33,498 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T03:48:33,498 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:48:33,498 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:48:33,499 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:48:33,500 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T03:48:33,500 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:48:33,501 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:48:33,501 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:48:33,502 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:48:33,502 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:48:33,504 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:48:33,504 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:48:33,504 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T03:48:33,505 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:48:33,507 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:48:33,508 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=880403, jitterRate=0.1194903552532196}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T03:48:33,509 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732247313491Initializing all the Stores at 1732247313492 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247313492Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247313492Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247313492Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247313492Cleaning up temporary data from old regions at 1732247313504 (+12 ms)Region opened successfully at 1732247313508 (+4 ms) 2024-11-22T03:48:33,509 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T03:48:33,512 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cf9410b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c85114ed5096/172.17.0.2:0 2024-11-22T03:48:33,513 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T03:48:33,513 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T03:48:33,513 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T03:48:33,513 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T03:48:33,514 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T03:48:33,514 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T03:48:33,514 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T03:48:33,516 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T03:48:33,517 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T03:48:33,518 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T03:48:33,519 INFO [master/c85114ed5096:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T03:48:33,519 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T03:48:33,520 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T03:48:33,520 INFO [master/c85114ed5096:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T03:48:33,521 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T03:48:33,522 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T03:48:33,523 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T03:48:33,524 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T03:48:33,526 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T03:48:33,527 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T03:48:33,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:48:33,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38975-0x100658bb6930001, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:48:33,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:48:33,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38975-0x100658bb6930001, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:48:33,528 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c85114ed5096,33229,1732247312946, sessionid=0x100658bb6930000, setting cluster-up flag (Was=false) 2024-11-22T03:48:33,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:48:33,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38975-0x100658bb6930001, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:48:33,533 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T03:48:33,534 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c85114ed5096,33229,1732247312946 2024-11-22T03:48:33,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:48:33,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38975-0x100658bb6930001, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:48:33,539 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T03:48:33,539 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c85114ed5096,33229,1732247312946 2024-11-22T03:48:33,541 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T03:48:33,542 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T03:48:33,542 INFO [master/c85114ed5096:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T03:48:33,543 INFO [master/c85114ed5096:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T03:48:33,543 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c85114ed5096,33229,1732247312946 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T03:48:33,544 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:48:33,544 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:48:33,544 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:48:33,544 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:48:33,544 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c85114ed5096:0, corePoolSize=10, maxPoolSize=10 2024-11-22T03:48:33,544 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:48:33,544 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c85114ed5096:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:48:33,544 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:48:33,546 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:48:33,546 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T03:48:33,547 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:48:33,547 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T03:48:33,550 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732247343550 2024-11-22T03:48:33,551 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T03:48:33,551 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T03:48:33,551 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T03:48:33,551 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T03:48:33,551 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T03:48:33,551 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T03:48:33,553 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:33,553 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T03:48:33,553 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T03:48:33,553 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T03:48:33,554 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T03:48:33,554 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T03:48:33,555 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247313554,5,FailOnTimeoutGroup] 2024-11-22T03:48:33,555 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247313555,5,FailOnTimeoutGroup] 2024-11-22T03:48:33,555 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:33,555 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T03:48:33,555 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:33,555 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:33,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33109 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:48:33,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42569 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:48:33,557 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T03:48:33,557 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394 2024-11-22T03:48:33,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42569 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:48:33,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33109 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:48:33,564 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:48:33,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:48:33,566 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:48:33,566 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:48:33,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:48:33,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:48:33,568 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:48:33,568 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:48:33,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:48:33,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:48:33,569 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:48:33,569 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:48:33,570 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:48:33,570 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:48:33,571 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:48:33,571 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:48:33,571 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:48:33,571 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:48:33,572 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740 2024-11-22T03:48:33,572 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740 2024-11-22T03:48:33,574 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:48:33,574 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:48:33,574 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:48:33,576 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:48:33,578 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:48:33,579 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=729010, jitterRate=-0.07301616668701172}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:48:33,579 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732247313564Initializing all the Stores at 1732247313565 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247313565Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247313565Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247313565Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247313565Cleaning up temporary data from old regions at 1732247313574 (+9 ms)Region opened successfully at 1732247313579 (+5 ms) 2024-11-22T03:48:33,579 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:48:33,580 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:48:33,580 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:48:33,580 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:48:33,580 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:48:33,580 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:48:33,580 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732247313579Disabling compacts and flushes for region at 1732247313579Disabling writes for close at 1732247313580 (+1 ms)Writing region close event to WAL at 1732247313580Closed at 1732247313580 2024-11-22T03:48:33,582 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:48:33,582 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T03:48:33,582 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T03:48:33,583 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:48:33,584 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T03:48:33,611 INFO [RS:0;c85114ed5096:38975 {}] regionserver.HRegionServer(746): ClusterId : deb187fc-4c31-426f-a815-e9992112983d 2024-11-22T03:48:33,612 DEBUG [RS:0;c85114ed5096:38975 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T03:48:33,613 DEBUG [RS:0;c85114ed5096:38975 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T03:48:33,613 DEBUG [RS:0;c85114ed5096:38975 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T03:48:33,615 DEBUG [RS:0;c85114ed5096:38975 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T03:48:33,615 DEBUG [RS:0;c85114ed5096:38975 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@514c9302, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c85114ed5096/172.17.0.2:0 2024-11-22T03:48:33,626 DEBUG [RS:0;c85114ed5096:38975 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c85114ed5096:38975 2024-11-22T03:48:33,626 INFO [RS:0;c85114ed5096:38975 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T03:48:33,626 INFO [RS:0;c85114ed5096:38975 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T03:48:33,626 DEBUG [RS:0;c85114ed5096:38975 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T03:48:33,627 INFO [RS:0;c85114ed5096:38975 {}] regionserver.HRegionServer(2659): reportForDuty to master=c85114ed5096,33229,1732247312946 with port=38975, startcode=1732247312988 2024-11-22T03:48:33,627 DEBUG [RS:0;c85114ed5096:38975 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T03:48:33,629 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42609, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T03:48:33,629 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33229 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c85114ed5096,38975,1732247312988 2024-11-22T03:48:33,629 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33229 {}] master.ServerManager(517): Registering regionserver=c85114ed5096,38975,1732247312988 2024-11-22T03:48:33,631 DEBUG [RS:0;c85114ed5096:38975 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394 2024-11-22T03:48:33,631 DEBUG [RS:0;c85114ed5096:38975 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33141 2024-11-22T03:48:33,631 DEBUG [RS:0;c85114ed5096:38975 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T03:48:33,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:48:33,633 DEBUG [RS:0;c85114ed5096:38975 {}] zookeeper.ZKUtil(111): regionserver:38975-0x100658bb6930001, quorum=127.0.0.1:53951, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c85114ed5096,38975,1732247312988 2024-11-22T03:48:33,633 WARN [RS:0;c85114ed5096:38975 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:48:33,633 INFO [RS:0;c85114ed5096:38975 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:48:33,633 DEBUG [RS:0;c85114ed5096:38975 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988 2024-11-22T03:48:33,634 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c85114ed5096,38975,1732247312988] 2024-11-22T03:48:33,636 INFO [RS:0;c85114ed5096:38975 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T03:48:33,638 INFO [RS:0;c85114ed5096:38975 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T03:48:33,638 INFO [RS:0;c85114ed5096:38975 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T03:48:33,638 INFO [RS:0;c85114ed5096:38975 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:33,638 INFO [RS:0;c85114ed5096:38975 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T03:48:33,639 INFO [RS:0;c85114ed5096:38975 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T03:48:33,639 INFO [RS:0;c85114ed5096:38975 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:33,639 DEBUG [RS:0;c85114ed5096:38975 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:48:33,639 DEBUG [RS:0;c85114ed5096:38975 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:48:33,640 DEBUG [RS:0;c85114ed5096:38975 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:48:33,640 DEBUG [RS:0;c85114ed5096:38975 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:48:33,640 DEBUG [RS:0;c85114ed5096:38975 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:48:33,640 DEBUG [RS:0;c85114ed5096:38975 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c85114ed5096:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:48:33,640 DEBUG [RS:0;c85114ed5096:38975 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:48:33,640 DEBUG [RS:0;c85114ed5096:38975 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:48:33,640 DEBUG [RS:0;c85114ed5096:38975 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:48:33,640 DEBUG [RS:0;c85114ed5096:38975 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:48:33,640 DEBUG [RS:0;c85114ed5096:38975 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:48:33,640 DEBUG [RS:0;c85114ed5096:38975 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:48:33,640 DEBUG [RS:0;c85114ed5096:38975 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c85114ed5096:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:48:33,640 DEBUG [RS:0;c85114ed5096:38975 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:48:33,641 INFO [RS:0;c85114ed5096:38975 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:33,641 INFO [RS:0;c85114ed5096:38975 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:33,641 INFO [RS:0;c85114ed5096:38975 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:33,641 INFO [RS:0;c85114ed5096:38975 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:33,641 INFO [RS:0;c85114ed5096:38975 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:33,641 INFO [RS:0;c85114ed5096:38975 {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,38975,1732247312988-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:48:33,654 INFO [RS:0;c85114ed5096:38975 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T03:48:33,655 INFO [RS:0;c85114ed5096:38975 {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,38975,1732247312988-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:33,655 INFO [RS:0;c85114ed5096:38975 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:33,655 INFO [RS:0;c85114ed5096:38975 {}] regionserver.Replication(171): c85114ed5096,38975,1732247312988 started 2024-11-22T03:48:33,668 INFO [RS:0;c85114ed5096:38975 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:33,668 INFO [RS:0;c85114ed5096:38975 {}] regionserver.HRegionServer(1482): Serving as c85114ed5096,38975,1732247312988, RpcServer on c85114ed5096/172.17.0.2:38975, sessionid=0x100658bb6930001 2024-11-22T03:48:33,668 DEBUG [RS:0;c85114ed5096:38975 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T03:48:33,668 DEBUG [RS:0;c85114ed5096:38975 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c85114ed5096,38975,1732247312988 2024-11-22T03:48:33,668 DEBUG [RS:0;c85114ed5096:38975 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c85114ed5096,38975,1732247312988' 2024-11-22T03:48:33,668 DEBUG [RS:0;c85114ed5096:38975 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T03:48:33,669 DEBUG [RS:0;c85114ed5096:38975 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T03:48:33,669 DEBUG [RS:0;c85114ed5096:38975 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T03:48:33,669 DEBUG [RS:0;c85114ed5096:38975 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T03:48:33,669 DEBUG [RS:0;c85114ed5096:38975 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c85114ed5096,38975,1732247312988 2024-11-22T03:48:33,669 DEBUG [RS:0;c85114ed5096:38975 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c85114ed5096,38975,1732247312988' 2024-11-22T03:48:33,669 DEBUG [RS:0;c85114ed5096:38975 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T03:48:33,670 DEBUG [RS:0;c85114ed5096:38975 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T03:48:33,670 DEBUG [RS:0;c85114ed5096:38975 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T03:48:33,670 INFO [RS:0;c85114ed5096:38975 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T03:48:33,670 INFO [RS:0;c85114ed5096:38975 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T03:48:33,735 WARN [c85114ed5096:33229 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T03:48:33,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:33,772 INFO [RS:0;c85114ed5096:38975 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C38975%2C1732247312988, suffix=, logDir=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988, archiveDir=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/oldWALs, maxLogs=32 2024-11-22T03:48:33,774 INFO [RS:0;c85114ed5096:38975 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C38975%2C1732247312988.1732247313773 2024-11-22T03:48:33,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:33,782 INFO [RS:0;c85114ed5096:38975 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247313773 2024-11-22T03:48:33,783 DEBUG [RS:0;c85114ed5096:38975 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45023:45023),(127.0.0.1/127.0.0.1:41009:41009)] 2024-11-22T03:48:33,985 DEBUG [c85114ed5096:33229 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T03:48:33,986 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c85114ed5096,38975,1732247312988 2024-11-22T03:48:33,989 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c85114ed5096,38975,1732247312988, state=OPENING 2024-11-22T03:48:33,991 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T03:48:33,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:48:33,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38975-0x100658bb6930001, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:48:33,995 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:48:33,995 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:48:33,995 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:48:33,995 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c85114ed5096,38975,1732247312988}] 2024-11-22T03:48:34,151 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T03:48:34,156 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38571, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T03:48:34,164 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T03:48:34,164 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:48:34,167 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C38975%2C1732247312988.meta, suffix=.meta, logDir=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988, archiveDir=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/oldWALs, maxLogs=32 2024-11-22T03:48:34,168 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C38975%2C1732247312988.meta.1732247314168.meta 2024-11-22T03:48:34,174 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.meta.1732247314168.meta 2024-11-22T03:48:34,175 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41009:41009),(127.0.0.1/127.0.0.1:45023:45023)] 2024-11-22T03:48:34,176 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:48:34,176 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T03:48:34,176 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T03:48:34,177 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T03:48:34,177 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T03:48:34,177 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:48:34,177 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T03:48:34,177 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T03:48:34,179 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:48:34,180 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:48:34,180 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:48:34,180 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:48:34,181 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:48:34,181 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:48:34,182 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:48:34,182 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:48:34,182 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:48:34,183 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:48:34,183 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:48:34,184 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:48:34,184 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:48:34,185 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:48:34,185 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:48:34,186 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:48:34,186 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:48:34,187 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740 2024-11-22T03:48:34,188 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740 2024-11-22T03:48:34,190 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:48:34,190 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:48:34,190 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:48:34,192 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:48:34,194 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=800543, jitterRate=0.01794399321079254}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:48:34,194 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T03:48:34,195 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732247314177Writing region info on filesystem at 1732247314177Initializing all the Stores at 1732247314178 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247314178Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247314178Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247314178Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247314178Cleaning up temporary data from old regions at 1732247314190 (+12 ms)Running coprocessor post-open hooks at 1732247314194 (+4 ms)Region opened successfully at 1732247314195 (+1 ms) 2024-11-22T03:48:34,196 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732247314150 2024-11-22T03:48:34,199 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T03:48:34,199 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T03:48:34,200 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c85114ed5096,38975,1732247312988 2024-11-22T03:48:34,202 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c85114ed5096,38975,1732247312988, state=OPEN 2024-11-22T03:48:34,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38975-0x100658bb6930001, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:48:34,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:48:34,204 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c85114ed5096,38975,1732247312988 2024-11-22T03:48:34,204 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:48:34,204 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:48:34,206 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T03:48:34,207 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c85114ed5096,38975,1732247312988 in 209 msec 2024-11-22T03:48:34,209 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T03:48:34,209 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 625 msec 2024-11-22T03:48:34,209 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:48:34,209 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T03:48:34,211 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:48:34,211 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c85114ed5096,38975,1732247312988, seqNum=-1] 2024-11-22T03:48:34,211 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:48:34,212 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37739, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:48:34,219 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 675 msec 2024-11-22T03:48:34,219 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732247314219, completionTime=-1 2024-11-22T03:48:34,219 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T03:48:34,219 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T03:48:34,221 INFO [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T03:48:34,221 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732247374221 2024-11-22T03:48:34,221 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732247434221 2024-11-22T03:48:34,221 INFO [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T03:48:34,222 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,33229,1732247312946-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:34,222 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,33229,1732247312946-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:34,222 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,33229,1732247312946-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:34,222 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c85114ed5096:33229, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:34,222 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:34,223 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:34,225 DEBUG [master/c85114ed5096:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T03:48:34,227 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.204sec 2024-11-22T03:48:34,227 INFO [master/c85114ed5096:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T03:48:34,227 INFO [master/c85114ed5096:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T03:48:34,227 INFO [master/c85114ed5096:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T03:48:34,227 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T03:48:34,227 INFO [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T03:48:34,227 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,33229,1732247312946-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:48:34,227 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,33229,1732247312946-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T03:48:34,230 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T03:48:34,230 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T03:48:34,230 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,33229,1732247312946-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:48:34,313 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35247242, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:48:34,313 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c85114ed5096,33229,-1 for getting cluster id 2024-11-22T03:48:34,313 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T03:48:34,317 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'deb187fc-4c31-426f-a815-e9992112983d' 2024-11-22T03:48:34,317 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T03:48:34,317 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "deb187fc-4c31-426f-a815-e9992112983d" 2024-11-22T03:48:34,318 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54916683, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:48:34,318 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c85114ed5096,33229,-1] 2024-11-22T03:48:34,318 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T03:48:34,319 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:48:34,321 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40770, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T03:48:34,322 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32018167, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:48:34,322 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:48:34,324 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c85114ed5096,38975,1732247312988, seqNum=-1] 2024-11-22T03:48:34,324 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:48:34,326 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38242, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:48:34,329 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c85114ed5096,33229,1732247312946 2024-11-22T03:48:34,329 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:48:34,332 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T03:48:34,332 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-22T03:48:34,332 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-22T03:48:34,332 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T03:48:34,333 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is c85114ed5096,33229,1732247312946 2024-11-22T03:48:34,333 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@49669922 2024-11-22T03:48:34,333 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T03:48:34,335 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40772, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T03:48:34,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T03:48:34,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T03:48:34,336 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:48:34,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-22T03:48:34,339 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T03:48:34,340 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:48:34,340 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-22T03:48:34,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T03:48:34,341 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T03:48:34,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33109 is added to blk_1073741835_1011 (size=395) 2024-11-22T03:48:34,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42569 is added to blk_1073741835_1011 (size=395) 2024-11-22T03:48:34,353 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4cab042691babe1517289fb93d98f2ae, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394 2024-11-22T03:48:34,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33109 is added to blk_1073741836_1012 (size=78) 2024-11-22T03:48:34,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42569 is added to blk_1073741836_1012 (size=78) 2024-11-22T03:48:34,360 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:48:34,360 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 4cab042691babe1517289fb93d98f2ae, disabling compactions & flushes 2024-11-22T03:48:34,360 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae. 2024-11-22T03:48:34,360 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae. 2024-11-22T03:48:34,360 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae. after waiting 0 ms 2024-11-22T03:48:34,360 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae. 2024-11-22T03:48:34,360 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae. 2024-11-22T03:48:34,360 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4cab042691babe1517289fb93d98f2ae: Waiting for close lock at 1732247314360Disabling compacts and flushes for region at 1732247314360Disabling writes for close at 1732247314360Writing region close event to WAL at 1732247314360Closed at 1732247314360 2024-11-22T03:48:34,362 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T03:48:34,362 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732247314362"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732247314362"}]},"ts":"1732247314362"} 2024-11-22T03:48:34,364 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T03:48:34,365 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T03:48:34,365 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732247314365"}]},"ts":"1732247314365"} 2024-11-22T03:48:34,367 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-22T03:48:34,368 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=4cab042691babe1517289fb93d98f2ae, ASSIGN}] 2024-11-22T03:48:34,369 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=4cab042691babe1517289fb93d98f2ae, ASSIGN 2024-11-22T03:48:34,370 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=4cab042691babe1517289fb93d98f2ae, ASSIGN; state=OFFLINE, location=c85114ed5096,38975,1732247312988; forceNewPlan=false, retain=false 2024-11-22T03:48:34,521 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4cab042691babe1517289fb93d98f2ae, regionState=OPENING, regionLocation=c85114ed5096,38975,1732247312988 2024-11-22T03:48:34,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=4cab042691babe1517289fb93d98f2ae, ASSIGN because future has completed 2024-11-22T03:48:34,524 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4cab042691babe1517289fb93d98f2ae, server=c85114ed5096,38975,1732247312988}] 2024-11-22T03:48:34,684 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae. 2024-11-22T03:48:34,684 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4cab042691babe1517289fb93d98f2ae, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:48:34,685 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 4cab042691babe1517289fb93d98f2ae 2024-11-22T03:48:34,685 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:48:34,685 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4cab042691babe1517289fb93d98f2ae 2024-11-22T03:48:34,685 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4cab042691babe1517289fb93d98f2ae 2024-11-22T03:48:34,687 INFO [StoreOpener-4cab042691babe1517289fb93d98f2ae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4cab042691babe1517289fb93d98f2ae 2024-11-22T03:48:34,688 INFO [StoreOpener-4cab042691babe1517289fb93d98f2ae-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4cab042691babe1517289fb93d98f2ae columnFamilyName info 2024-11-22T03:48:34,689 DEBUG [StoreOpener-4cab042691babe1517289fb93d98f2ae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:48:34,689 INFO [StoreOpener-4cab042691babe1517289fb93d98f2ae-1 {}] regionserver.HStore(327): Store=4cab042691babe1517289fb93d98f2ae/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:48:34,689 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4cab042691babe1517289fb93d98f2ae 2024-11-22T03:48:34,690 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/default/TestLogRolling-testLogRollOnPipelineRestart/4cab042691babe1517289fb93d98f2ae 2024-11-22T03:48:34,691 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/default/TestLogRolling-testLogRollOnPipelineRestart/4cab042691babe1517289fb93d98f2ae 2024-11-22T03:48:34,691 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4cab042691babe1517289fb93d98f2ae 2024-11-22T03:48:34,691 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4cab042691babe1517289fb93d98f2ae 2024-11-22T03:48:34,693 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4cab042691babe1517289fb93d98f2ae 2024-11-22T03:48:34,696 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/default/TestLogRolling-testLogRollOnPipelineRestart/4cab042691babe1517289fb93d98f2ae/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:48:34,696 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4cab042691babe1517289fb93d98f2ae; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=787006, jitterRate=7.307976484298706E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T03:48:34,697 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4cab042691babe1517289fb93d98f2ae 2024-11-22T03:48:34,698 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4cab042691babe1517289fb93d98f2ae: Running coprocessor pre-open hook at 1732247314685Writing region info on filesystem at 1732247314685Initializing all the Stores at 1732247314686 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247314687 (+1 ms)Cleaning up temporary data from old regions at 1732247314691 (+4 ms)Running coprocessor post-open hooks at 1732247314697 (+6 ms)Region opened successfully at 1732247314698 (+1 ms) 2024-11-22T03:48:34,699 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae., pid=6, masterSystemTime=1732247314677 2024-11-22T03:48:34,701 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae. 2024-11-22T03:48:34,702 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae. 2024-11-22T03:48:34,703 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4cab042691babe1517289fb93d98f2ae, regionState=OPEN, openSeqNum=2, regionLocation=c85114ed5096,38975,1732247312988 2024-11-22T03:48:34,705 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4cab042691babe1517289fb93d98f2ae, server=c85114ed5096,38975,1732247312988 because future has completed 2024-11-22T03:48:34,709 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T03:48:34,709 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4cab042691babe1517289fb93d98f2ae, server=c85114ed5096,38975,1732247312988 in 182 msec 2024-11-22T03:48:34,711 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T03:48:34,711 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=4cab042691babe1517289fb93d98f2ae, ASSIGN in 342 msec 2024-11-22T03:48:34,712 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T03:48:34,712 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732247314712"}]},"ts":"1732247314712"} 2024-11-22T03:48:34,714 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-22T03:48:34,715 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T03:48:34,717 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 379 msec 2024-11-22T03:48:34,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:34,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:35,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:35,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:36,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:36,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:37,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:37,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:38,749 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T03:48:38,750 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T03:48:38,752 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-22T03:48:38,752 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-22T03:48:38,754 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:48:38,754 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T03:48:38,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:38,785 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:39,685 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T03:48:39,708 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:39,708 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:39,708 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:39,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:39,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:39,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:39,712 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:39,713 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:39,713 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:39,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:48:39,720 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T03:48:39,720 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-22T03:48:39,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:39,785 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:40,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:40,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:41,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:41,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:42,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:42,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:43,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:43,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:44,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33229 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T03:48:44,417 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-22T03:48:44,418 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-22T03:48:44,424 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-22T03:48:44,424 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae. 2024-11-22T03:48:44,429 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae., hostname=c85114ed5096,38975,1732247312988, seqNum=2] 2024-11-22T03:48:44,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:44,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:45,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:45,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:46,433 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247313773 2024-11-22T03:48:46,434 WARN [ResponseProcessor for block BP-894353976-172.17.0.2-1732247312367:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-894353976-172.17.0.2-1732247312367:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:46,434 WARN [ResponseProcessor for block BP-894353976-172.17.0.2-1732247312367:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-894353976-172.17.0.2-1732247312367:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-894353976-172.17.0.2-1732247312367:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:42569,DS-c90cdbcd-6b2e-40ec-a4e7-6e9799ea3822,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:46,435 WARN [ResponseProcessor for block BP-894353976-172.17.0.2-1732247312367:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-894353976-172.17.0.2-1732247312367:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-894353976-172.17.0.2-1732247312367:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:42569,DS-c90cdbcd-6b2e-40ec-a4e7-6e9799ea3822,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:46,435 WARN [DataStreamer for file /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247313773 block BP-894353976-172.17.0.2-1732247312367:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-894353976-172.17.0.2-1732247312367:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42569,DS-c90cdbcd-6b2e-40ec-a4e7-6e9799ea3822,DISK], DatanodeInfoWithStorage[127.0.0.1:33109,DS-617819d4-916b-4576-9dac-8f869afd216e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42569,DS-c90cdbcd-6b2e-40ec-a4e7-6e9799ea3822,DISK]) is bad. 2024-11-22T03:48:46,436 WARN [DataStreamer for file /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/WALs/c85114ed5096,33229,1732247312946/c85114ed5096%2C33229%2C1732247312946.1732247313484 block BP-894353976-172.17.0.2-1732247312367:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-894353976-172.17.0.2-1732247312367:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33109,DS-617819d4-916b-4576-9dac-8f869afd216e,DISK], DatanodeInfoWithStorage[127.0.0.1:42569,DS-c90cdbcd-6b2e-40ec-a4e7-6e9799ea3822,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42569,DS-c90cdbcd-6b2e-40ec-a4e7-6e9799ea3822,DISK]) is bad. 2024-11-22T03:48:46,436 WARN [DataStreamer for file /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.meta.1732247314168.meta block BP-894353976-172.17.0.2-1732247312367:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-894353976-172.17.0.2-1732247312367:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33109,DS-617819d4-916b-4576-9dac-8f869afd216e,DISK], DatanodeInfoWithStorage[127.0.0.1:42569,DS-c90cdbcd-6b2e-40ec-a4e7-6e9799ea3822,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42569,DS-c90cdbcd-6b2e-40ec-a4e7-6e9799ea3822,DISK]) is bad. 2024-11-22T03:48:46,436 WARN [PacketResponder: BP-894353976-172.17.0.2-1732247312367:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42569] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:46,437 WARN [PacketResponder: BP-894353976-172.17.0.2-1732247312367:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42569] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:46,437 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-672230656_22 at /127.0.0.1:59778 [Receiving block BP-894353976-172.17.0.2-1732247312367:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42569:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59778 dst: /127.0.0.1:42569 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:46,437 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-672230656_22 at /127.0.0.1:52470 [Receiving block BP-894353976-172.17.0.2-1732247312367:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52470 dst: /127.0.0.1:33109 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:46,438 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_932577279_22 at /127.0.0.1:52438 [Receiving block BP-894353976-172.17.0.2-1732247312367:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52438 dst: /127.0.0.1:33109 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:46,438 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-672230656_22 at /127.0.0.1:59784 [Receiving block BP-894353976-172.17.0.2-1732247312367:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42569:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59784 dst: /127.0.0.1:42569 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:46,438 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-672230656_22 at /127.0.0.1:52478 [Receiving block BP-894353976-172.17.0.2-1732247312367:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52478 dst: /127.0.0.1:33109 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:46,438 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_932577279_22 at /127.0.0.1:59742 [Receiving block BP-894353976-172.17.0.2-1732247312367:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42569:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59742 dst: /127.0.0.1:42569 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:46,441 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5151c3af{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:48:46,441 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@9952a37{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:48:46,441 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:48:46,442 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4da18e2b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:48:46,442 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@585ec7cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/hadoop.log.dir/,STOPPED} 2024-11-22T03:48:46,443 WARN [BP-894353976-172.17.0.2-1732247312367 heartbeating to localhost/127.0.0.1:33141 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:48:46,443 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:48:46,443 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:48:46,443 WARN [BP-894353976-172.17.0.2-1732247312367 heartbeating to localhost/127.0.0.1:33141 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-894353976-172.17.0.2-1732247312367 (Datanode Uuid 8f5b92ec-9a64-4f50-bf34-61b6438278c4) service to localhost/127.0.0.1:33141 2024-11-22T03:48:46,444 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/cluster_1833d8fb-dda1-899b-b10f-510f7cc75d7f/data/data3/current/BP-894353976-172.17.0.2-1732247312367 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:48:46,444 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/cluster_1833d8fb-dda1-899b-b10f-510f7cc75d7f/data/data4/current/BP-894353976-172.17.0.2-1732247312367 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:48:46,444 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:48:46,452 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:48:46,455 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:48:46,456 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:48:46,456 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:48:46,456 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:48:46,456 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b0c43a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:48:46,456 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1cd7b468{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:48:46,550 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c83e722{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/java.io.tmpdir/jetty-localhost-36451-hadoop-hdfs-3_4_1-tests_jar-_-any-6786679023900188581/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:48:46,550 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@79b4e2a9{HTTP/1.1, (http/1.1)}{localhost:36451} 2024-11-22T03:48:46,550 INFO [Time-limited test {}] server.Server(415): Started @163877ms 2024-11-22T03:48:46,551 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:48:46,570 WARN [ResponseProcessor for block BP-894353976-172.17.0.2-1732247312367:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-894353976-172.17.0.2-1732247312367:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:46,570 WARN [ResponseProcessor for block BP-894353976-172.17.0.2-1732247312367:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-894353976-172.17.0.2-1732247312367:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:46,570 WARN [ResponseProcessor for block BP-894353976-172.17.0.2-1732247312367:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-894353976-172.17.0.2-1732247312367:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:46,570 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_932577279_22 at /127.0.0.1:60018 [Receiving block BP-894353976-172.17.0.2-1732247312367:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60018 dst: /127.0.0.1:33109 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:46,570 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-672230656_22 at /127.0.0.1:60008 [Receiving block BP-894353976-172.17.0.2-1732247312367:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60008 dst: /127.0.0.1:33109 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:46,570 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-672230656_22 at /127.0.0.1:59996 [Receiving block BP-894353976-172.17.0.2-1732247312367:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33109:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59996 dst: /127.0.0.1:33109 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:46,572 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@265efa03{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:48:46,572 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@34714045{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:48:46,572 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:48:46,572 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@512c947f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:48:46,572 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3cc9fb65{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/hadoop.log.dir/,STOPPED} 2024-11-22T03:48:46,577 WARN [BP-894353976-172.17.0.2-1732247312367 heartbeating to localhost/127.0.0.1:33141 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:48:46,577 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:48:46,577 WARN [BP-894353976-172.17.0.2-1732247312367 heartbeating to localhost/127.0.0.1:33141 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-894353976-172.17.0.2-1732247312367 (Datanode Uuid 9db1947b-5ca6-458d-a2e7-c7d00008effd) service to localhost/127.0.0.1:33141 2024-11-22T03:48:46,577 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:48:46,578 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/cluster_1833d8fb-dda1-899b-b10f-510f7cc75d7f/data/data1/current/BP-894353976-172.17.0.2-1732247312367 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:48:46,578 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/cluster_1833d8fb-dda1-899b-b10f-510f7cc75d7f/data/data2/current/BP-894353976-172.17.0.2-1732247312367 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:48:46,578 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:48:46,589 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:48:46,593 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:48:46,594 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:48:46,594 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:48:46,594 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:48:46,594 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59ca75fb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:48:46,595 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50b0b859{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:48:46,627 WARN [Thread-1338 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:48:46,629 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe3ebca5ad65857ee with lease ID 0x5982d97bd824242a: from storage DS-c90cdbcd-6b2e-40ec-a4e7-6e9799ea3822 node DatanodeRegistration(127.0.0.1:34431, datanodeUuid=8f5b92ec-9a64-4f50-bf34-61b6438278c4, infoPort=44781, infoSecurePort=0, ipcPort=41097, storageInfo=lv=-57;cid=testClusterID;nsid=2072024911;c=1732247312367), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:48:46,630 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe3ebca5ad65857ee with lease ID 0x5982d97bd824242a: from storage DS-130457d9-dfff-42bc-9e58-276a248b05b0 node DatanodeRegistration(127.0.0.1:34431, datanodeUuid=8f5b92ec-9a64-4f50-bf34-61b6438278c4, infoPort=44781, infoSecurePort=0, ipcPort=41097, storageInfo=lv=-57;cid=testClusterID;nsid=2072024911;c=1732247312367), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:48:46,695 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6086fda4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/java.io.tmpdir/jetty-localhost-45811-hadoop-hdfs-3_4_1-tests_jar-_-any-17676675004507930935/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:48:46,695 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ccc23dd{HTTP/1.1, (http/1.1)}{localhost:45811} 2024-11-22T03:48:46,696 INFO [Time-limited test {}] server.Server(415): Started @164022ms 2024-11-22T03:48:46,697 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:48:46,762 WARN [Thread-1369 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:48:46,764 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64102a6b4387b2e2 with lease ID 0x5982d97bd824242b: from storage DS-617819d4-916b-4576-9dac-8f869afd216e node DatanodeRegistration(127.0.0.1:42281, datanodeUuid=9db1947b-5ca6-458d-a2e7-c7d00008effd, infoPort=44953, infoSecurePort=0, ipcPort=34119, storageInfo=lv=-57;cid=testClusterID;nsid=2072024911;c=1732247312367), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:48:46,764 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64102a6b4387b2e2 with lease ID 0x5982d97bd824242b: from storage DS-86215298-ed51-4f3a-88ce-ab012dccf361 node DatanodeRegistration(127.0.0.1:42281, datanodeUuid=9db1947b-5ca6-458d-a2e7-c7d00008effd, infoPort=44953, infoSecurePort=0, ipcPort=34119, storageInfo=lv=-57;cid=testClusterID;nsid=2072024911;c=1732247312367), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:48:46,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:46,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:47,714 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-22T03:48:47,721 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-22T03:48:47,725 ERROR [FSHLog-0-hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394-prefix:c85114ed5096,38975,1732247312988 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33109,DS-617819d4-916b-4576-9dac-8f869afd216e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:47,725 WARN [FSHLog-0-hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394-prefix:c85114ed5096,38975,1732247312988 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33109,DS-617819d4-916b-4576-9dac-8f869afd216e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:47,725 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c85114ed5096%2C38975%2C1732247312988:(num 1732247313773) roll requested 2024-11-22T03:48:47,725 INFO [regionserver/c85114ed5096:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C38975%2C1732247312988.1732247327725 2024-11-22T03:48:47,734 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247313773 newFile=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247327725 2024-11-22T03:48:47,734 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:47,734 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:47,735 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:47,735 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:47,735 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:47,735 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247313773 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247327725 2024-11-22T03:48:47,736 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33109,DS-617819d4-916b-4576-9dac-8f869afd216e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:47,737 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33109,DS-617819d4-916b-4576-9dac-8f869afd216e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:47,737 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247313773 2024-11-22T03:48:47,737 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44781:44781),(127.0.0.1/127.0.0.1:44953:44953)] 2024-11-22T03:48:47,737 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247313773 is not closed yet, will try archiving it next time 2024-11-22T03:48:47,737 WARN [IPC Server handler 3 on default port 33141 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247313773 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-22T03:48:47,738 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247313773 after 1ms 2024-11-22T03:48:47,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:47,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:48,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:48,795 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:49,743 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-22T03:48:49,790 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:49,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:50,631 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-22T03:48:50,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:50,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:51,740 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247313773 after 4003ms 2024-11-22T03:48:51,749 WARN [ResponseProcessor for block BP-894353976-172.17.0.2-1732247312367:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-894353976-172.17.0.2-1732247312367:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-894353976-172.17.0.2-1732247312367:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:42281,DS-617819d4-916b-4576-9dac-8f869afd216e,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:51,750 WARN [DataStreamer for file /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247327725 block BP-894353976-172.17.0.2-1732247312367:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-894353976-172.17.0.2-1732247312367:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34431,DS-c90cdbcd-6b2e-40ec-a4e7-6e9799ea3822,DISK], DatanodeInfoWithStorage[127.0.0.1:42281,DS-617819d4-916b-4576-9dac-8f869afd216e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42281,DS-617819d4-916b-4576-9dac-8f869afd216e,DISK]) is bad. 2024-11-22T03:48:51,750 WARN [PacketResponder: BP-894353976-172.17.0.2-1732247312367:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42281] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:51,751 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-672230656_22 at /127.0.0.1:35224 [Receiving block BP-894353976-172.17.0.2-1732247312367:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:34431:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35224 dst: /127.0.0.1:34431 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:51,752 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-672230656_22 at /127.0.0.1:41522 [Receiving block BP-894353976-172.17.0.2-1732247312367:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42281:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41522 dst: /127.0.0.1:42281 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:51,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6086fda4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:48:51,755 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ccc23dd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:48:51,755 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:48:51,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50b0b859{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:48:51,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59ca75fb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/hadoop.log.dir/,STOPPED} 2024-11-22T03:48:51,757 WARN [BP-894353976-172.17.0.2-1732247312367 heartbeating to localhost/127.0.0.1:33141 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:48:51,757 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:48:51,757 WARN [BP-894353976-172.17.0.2-1732247312367 heartbeating to localhost/127.0.0.1:33141 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-894353976-172.17.0.2-1732247312367 (Datanode Uuid 9db1947b-5ca6-458d-a2e7-c7d00008effd) service to localhost/127.0.0.1:33141 2024-11-22T03:48:51,757 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:48:51,758 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/cluster_1833d8fb-dda1-899b-b10f-510f7cc75d7f/data/data1/current/BP-894353976-172.17.0.2-1732247312367 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:48:51,758 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/cluster_1833d8fb-dda1-899b-b10f-510f7cc75d7f/data/data2/current/BP-894353976-172.17.0.2-1732247312367 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:48:51,758 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:48:51,766 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:48:51,771 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:48:51,771 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:48:51,771 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:48:51,771 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:48:51,772 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26a107ff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:48:51,772 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37f01e59{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:48:51,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:51,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:51,866 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@710628af{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/java.io.tmpdir/jetty-localhost-39679-hadoop-hdfs-3_4_1-tests_jar-_-any-9943647685838696243/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:48:51,867 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@797b72a2{HTTP/1.1, (http/1.1)}{localhost:39679} 2024-11-22T03:48:51,867 INFO [Time-limited test {}] server.Server(415): Started @169193ms 2024-11-22T03:48:51,868 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:48:51,884 WARN [ResponseProcessor for block BP-894353976-172.17.0.2-1732247312367:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-894353976-172.17.0.2-1732247312367:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:51,885 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-672230656_22 at /127.0.0.1:41988 [Receiving block BP-894353976-172.17.0.2-1732247312367:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:34431:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41988 dst: /127.0.0.1:34431 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:51,890 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c83e722{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:48:51,890 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@79b4e2a9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:48:51,890 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:48:51,891 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1cd7b468{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:48:51,891 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b0c43a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/hadoop.log.dir/,STOPPED} 2024-11-22T03:48:51,892 WARN [BP-894353976-172.17.0.2-1732247312367 heartbeating to localhost/127.0.0.1:33141 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:48:51,892 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:48:51,892 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:48:51,892 WARN [BP-894353976-172.17.0.2-1732247312367 heartbeating to localhost/127.0.0.1:33141 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-894353976-172.17.0.2-1732247312367 (Datanode Uuid 8f5b92ec-9a64-4f50-bf34-61b6438278c4) service to localhost/127.0.0.1:33141 2024-11-22T03:48:51,892 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/cluster_1833d8fb-dda1-899b-b10f-510f7cc75d7f/data/data3/current/BP-894353976-172.17.0.2-1732247312367 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:48:51,893 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/cluster_1833d8fb-dda1-899b-b10f-510f7cc75d7f/data/data4/current/BP-894353976-172.17.0.2-1732247312367 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:48:51,893 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:48:51,905 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:48:51,908 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:48:51,912 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:48:51,912 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:48:51,913 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:48:51,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5778aad8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:48:51,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68208c52{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:48:51,940 WARN [Thread-1412 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:48:51,942 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc525529d53c652b4 with lease ID 0x5982d97bd824242c: from storage DS-617819d4-916b-4576-9dac-8f869afd216e node DatanodeRegistration(127.0.0.1:38737, datanodeUuid=9db1947b-5ca6-458d-a2e7-c7d00008effd, infoPort=40797, infoSecurePort=0, ipcPort=44793, storageInfo=lv=-57;cid=testClusterID;nsid=2072024911;c=1732247312367), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:48:51,943 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc525529d53c652b4 with lease ID 0x5982d97bd824242c: from storage DS-86215298-ed51-4f3a-88ce-ab012dccf361 node DatanodeRegistration(127.0.0.1:38737, datanodeUuid=9db1947b-5ca6-458d-a2e7-c7d00008effd, infoPort=40797, infoSecurePort=0, ipcPort=44793, storageInfo=lv=-57;cid=testClusterID;nsid=2072024911;c=1732247312367), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:48:52,008 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14081bd9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/java.io.tmpdir/jetty-localhost-34701-hadoop-hdfs-3_4_1-tests_jar-_-any-7579372085248699276/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:48:52,008 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e7f9298{HTTP/1.1, (http/1.1)}{localhost:34701} 2024-11-22T03:48:52,009 INFO [Time-limited test {}] server.Server(415): Started @169335ms 2024-11-22T03:48:52,010 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:48:52,075 WARN [Thread-1443 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:48:52,078 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaf3a0ca473d7ae2c with lease ID 0x5982d97bd824242d: from storage DS-c90cdbcd-6b2e-40ec-a4e7-6e9799ea3822 node DatanodeRegistration(127.0.0.1:34323, datanodeUuid=8f5b92ec-9a64-4f50-bf34-61b6438278c4, infoPort=43009, infoSecurePort=0, ipcPort=33267, storageInfo=lv=-57;cid=testClusterID;nsid=2072024911;c=1732247312367), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:48:52,078 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaf3a0ca473d7ae2c with lease ID 0x5982d97bd824242d: from storage DS-130457d9-dfff-42bc-9e58-276a248b05b0 node DatanodeRegistration(127.0.0.1:34323, datanodeUuid=8f5b92ec-9a64-4f50-bf34-61b6438278c4, infoPort=43009, infoSecurePort=0, ipcPort=33267, storageInfo=lv=-57;cid=testClusterID;nsid=2072024911;c=1732247312367), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:48:52,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:52,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:53,028 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-22T03:48:53,033 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-22T03:48:53,036 ERROR [FSHLog-0-hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394-prefix:c85114ed5096,38975,1732247312988 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34431,DS-c90cdbcd-6b2e-40ec-a4e7-6e9799ea3822,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:53,036 WARN [FSHLog-0-hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394-prefix:c85114ed5096,38975,1732247312988 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34431,DS-c90cdbcd-6b2e-40ec-a4e7-6e9799ea3822,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:53,036 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c85114ed5096%2C38975%2C1732247312988:(num 1732247327725) roll requested 2024-11-22T03:48:53,037 INFO [regionserver/c85114ed5096:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C38975%2C1732247312988.1732247333037 2024-11-22T03:48:53,043 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247327725 newFile=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247333037 2024-11-22T03:48:53,043 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:53,044 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:53,044 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:53,044 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:53,044 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:53,044 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247327725 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247333037 2024-11-22T03:48:53,044 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34431,DS-c90cdbcd-6b2e-40ec-a4e7-6e9799ea3822,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:53,045 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34431,DS-c90cdbcd-6b2e-40ec-a4e7-6e9799ea3822,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:53,045 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247327725 2024-11-22T03:48:53,045 WARN [IPC Server handler 1 on default port 33141 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247327725 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-22T03:48:53,046 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247327725 after 1ms 2024-11-22T03:48:53,046 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40797:40797),(127.0.0.1/127.0.0.1:43009:43009)] 2024-11-22T03:48:53,046 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247327725 is not closed yet, will try archiving it next time 2024-11-22T03:48:53,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:53,800 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:53,944 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-22T03:48:54,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:54,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:55,048 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C38975%2C1732247312988.1732247335048 2024-11-22T03:48:55,061 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247333037 newFile=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247335048 2024-11-22T03:48:55,061 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:55,061 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:55,061 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:55,062 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:55,062 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:55,062 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247333037 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247335048 2024-11-22T03:48:55,063 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43009:43009),(127.0.0.1/127.0.0.1:40797:40797)] 2024-11-22T03:48:55,063 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247327725 is not closed yet, will try archiving it next time 2024-11-22T03:48:55,063 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247333037 is not closed yet, will try archiving it next time 2024-11-22T03:48:55,064 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247313773 2024-11-22T03:48:55,064 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247313773 2024-11-22T03:48:55,065 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247313773 after 1ms 2024-11-22T03:48:55,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34323 is added to blk_1073741838_1019 (size=1264) 2024-11-22T03:48:55,065 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247313773 2024-11-22T03:48:55,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38737 is added to blk_1073741838_1019 (size=1264) 2024-11-22T03:48:55,066 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247327725 is not closed yet, will try archiving it next time 2024-11-22T03:48:55,074 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732247314698/Put/vlen=218/seqid=0] 2024-11-22T03:48:55,074 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732247324430/Put/vlen=1045/seqid=0] 2024-11-22T03:48:55,075 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247313773 2024-11-22T03:48:55,075 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247327725 2024-11-22T03:48:55,075 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247327725 2024-11-22T03:48:55,075 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247327725 after 0ms 2024-11-22T03:48:55,075 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247327725 2024-11-22T03:48:55,078 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732247327724/Put/vlen=1045/seqid=0] 2024-11-22T03:48:55,078 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732247329745/Put/vlen=1045/seqid=0] 2024-11-22T03:48:55,078 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247327725 2024-11-22T03:48:55,078 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247333037 2024-11-22T03:48:55,078 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247333037 2024-11-22T03:48:55,079 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247333037 after 1ms 2024-11-22T03:48:55,079 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247333037 2024-11-22T03:48:55,082 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732247333036/Put/vlen=1045/seqid=0] 2024-11-22T03:48:55,082 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247335048 2024-11-22T03:48:55,082 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247335048 2024-11-22T03:48:55,082 WARN [IPC Server handler 2 on default port 33141 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247335048 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-22T03:48:55,082 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247335048 after 0ms 2024-11-22T03:48:55,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:55,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:56,081 WARN [ResponseProcessor for block BP-894353976-172.17.0.2-1732247312367:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-894353976-172.17.0.2-1732247312367:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:56,081 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_932577279_22 at /127.0.0.1:47310 [Receiving block BP-894353976-172.17.0.2-1732247312367:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:34323:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47310 dst: /127.0.0.1:34323 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:34323 remote=/127.0.0.1:47310]. Total timeout mills is 60000, 58979 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:56,082 WARN [DataStreamer for file /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247335048 block BP-894353976-172.17.0.2-1732247312367:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-894353976-172.17.0.2-1732247312367:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34323,DS-c90cdbcd-6b2e-40ec-a4e7-6e9799ea3822,DISK], DatanodeInfoWithStorage[127.0.0.1:38737,DS-617819d4-916b-4576-9dac-8f869afd216e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34323,DS-c90cdbcd-6b2e-40ec-a4e7-6e9799ea3822,DISK]) is bad. 2024-11-22T03:48:56,082 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_932577279_22 at /127.0.0.1:40818 [Receiving block BP-894353976-172.17.0.2-1732247312367:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:38737:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40818 dst: /127.0.0.1:38737 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:48:56,089 WARN [DataStreamer for file /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247335048 block BP-894353976-172.17.0.2-1732247312367:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-894353976-172.17.0.2-1732247312367:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:56,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34323 is added to blk_1073741839_1022 (size=85) 2024-11-22T03:48:56,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38737 is added to blk_1073741839_1022 (size=85) 2024-11-22T03:48:56,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:56,804 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:57,048 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247327725 after 4003ms 2024-11-22T03:48:57,800 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:57,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:58,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:58,806 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:59,083 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247335048 after 4001ms 2024-11-22T03:48:59,083 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247335048 2024-11-22T03:48:59,087 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247335048 2024-11-22T03:48:59,087 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 4cab042691babe1517289fb93d98f2ae 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-22T03:48:59,087 ERROR [FSHLog-0-hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394-prefix:c85114ed5096,38975,1732247312988 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-894353976-172.17.0.2-1732247312367:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:59,087 WARN [FSHLog-0-hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394-prefix:c85114ed5096,38975,1732247312988 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-894353976-172.17.0.2-1732247312367:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:59,088 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c85114ed5096%2C38975%2C1732247312988:(num 1732247335048) roll requested 2024-11-22T03:48:59,088 INFO [regionserver/c85114ed5096:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C38975%2C1732247312988.1732247339088 2024-11-22T03:48:59,093 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247335048 newFile=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247339088 2024-11-22T03:48:59,094 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:59,094 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:59,094 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:59,094 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:59,094 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:59,094 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247335048 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247339088 2024-11-22T03:48:59,094 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-894353976-172.17.0.2-1732247312367:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:59,095 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40797:40797),(127.0.0.1/127.0.0.1:43009:43009)] 2024-11-22T03:48:59,095 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-894353976-172.17.0.2-1732247312367:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:59,095 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247335048 is not closed yet, will try archiving it next time 2024-11-22T03:48:59,095 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247335048 2024-11-22T03:48:59,096 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247335048 after 1ms 2024-11-22T03:48:59,096 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.1732247335048 to hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/oldWALs/c85114ed5096%2C38975%2C1732247312988.1732247335048 2024-11-22T03:48:59,112 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/default/TestLogRolling-testLogRollOnPipelineRestart/4cab042691babe1517289fb93d98f2ae/.tmp/info/9c04ce1991384529948031e40fe3feb4 is 1080, key is row1002/info:/1732247324430/Put/seqid=0 2024-11-22T03:48:59,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38737 is added to blk_1073741841_1024 (size=9270) 2024-11-22T03:48:59,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34323 is added to blk_1073741841_1024 (size=9270) 2024-11-22T03:48:59,117 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/default/TestLogRolling-testLogRollOnPipelineRestart/4cab042691babe1517289fb93d98f2ae/.tmp/info/9c04ce1991384529948031e40fe3feb4 2024-11-22T03:48:59,123 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/default/TestLogRolling-testLogRollOnPipelineRestart/4cab042691babe1517289fb93d98f2ae/.tmp/info/9c04ce1991384529948031e40fe3feb4 as hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/default/TestLogRolling-testLogRollOnPipelineRestart/4cab042691babe1517289fb93d98f2ae/info/9c04ce1991384529948031e40fe3feb4 2024-11-22T03:48:59,128 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/default/TestLogRolling-testLogRollOnPipelineRestart/4cab042691babe1517289fb93d98f2ae/info/9c04ce1991384529948031e40fe3feb4, entries=4, sequenceid=8, filesize=9.1 K 2024-11-22T03:48:59,129 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 4cab042691babe1517289fb93d98f2ae in 42ms, sequenceid=8, compaction requested=false 2024-11-22T03:48:59,129 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 4cab042691babe1517289fb93d98f2ae: 2024-11-22T03:48:59,129 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-22T03:48:59,130 ERROR [FSHLog-0-hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394-prefix:c85114ed5096,38975,1732247312988.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33109,DS-617819d4-916b-4576-9dac-8f869afd216e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:59,130 WARN [FSHLog-0-hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394-prefix:c85114ed5096,38975,1732247312988.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33109,DS-617819d4-916b-4576-9dac-8f869afd216e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:59,130 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c85114ed5096%2C38975%2C1732247312988.meta:.meta(num 1732247314168) roll requested 2024-11-22T03:48:59,130 INFO [regionserver/c85114ed5096:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C38975%2C1732247312988.meta.1732247339130.meta 2024-11-22T03:48:59,135 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:59,135 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:59,135 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:59,135 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:59,135 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:59,135 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.meta.1732247314168.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.meta.1732247339130.meta 2024-11-22T03:48:59,136 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33109,DS-617819d4-916b-4576-9dac-8f869afd216e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:59,136 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33109,DS-617819d4-916b-4576-9dac-8f869afd216e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:48:59,136 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.meta.1732247314168.meta 2024-11-22T03:48:59,136 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43009:43009),(127.0.0.1/127.0.0.1:40797:40797)] 2024-11-22T03:48:59,136 DEBUG [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.meta.1732247314168.meta is not closed yet, will try archiving it next time 2024-11-22T03:48:59,136 WARN [IPC Server handler 4 on default port 33141 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.meta.1732247314168.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1013 2024-11-22T03:48:59,137 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.meta.1732247314168.meta after 0ms 2024-11-22T03:48:59,154 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740/.tmp/info/f993a8cca18f4bd182e2785ed348b118 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae./info:regioninfo/1732247314703/Put/seqid=0 2024-11-22T03:48:59,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34323 is added to blk_1073741843_1027 (size=7125) 2024-11-22T03:48:59,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38737 is added to blk_1073741843_1027 (size=7125) 2024-11-22T03:48:59,159 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740/.tmp/info/f993a8cca18f4bd182e2785ed348b118 2024-11-22T03:48:59,179 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740/.tmp/ns/653ef794c4f64f3f90e7e95034719e60 is 43, key is default/ns:d/1732247314213/Put/seqid=0 2024-11-22T03:48:59,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38737 is added to blk_1073741844_1028 (size=5153) 2024-11-22T03:48:59,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34323 is added to blk_1073741844_1028 (size=5153) 2024-11-22T03:48:59,185 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740/.tmp/ns/653ef794c4f64f3f90e7e95034719e60 2024-11-22T03:48:59,204 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740/.tmp/table/918b62cd1fb244d38beb9174d09fd5b6 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732247314712/Put/seqid=0 2024-11-22T03:48:59,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38737 is added to blk_1073741845_1029 (size=5438) 2024-11-22T03:48:59,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34323 is added to blk_1073741845_1029 (size=5438) 2024-11-22T03:48:59,209 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740/.tmp/table/918b62cd1fb244d38beb9174d09fd5b6 2024-11-22T03:48:59,214 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740/.tmp/info/f993a8cca18f4bd182e2785ed348b118 as hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740/info/f993a8cca18f4bd182e2785ed348b118 2024-11-22T03:48:59,219 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740/info/f993a8cca18f4bd182e2785ed348b118, entries=10, sequenceid=11, filesize=7.0 K 2024-11-22T03:48:59,220 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740/.tmp/ns/653ef794c4f64f3f90e7e95034719e60 as hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740/ns/653ef794c4f64f3f90e7e95034719e60 2024-11-22T03:48:59,226 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740/ns/653ef794c4f64f3f90e7e95034719e60, entries=2, sequenceid=11, filesize=5.0 K 2024-11-22T03:48:59,227 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740/.tmp/table/918b62cd1fb244d38beb9174d09fd5b6 as hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740/table/918b62cd1fb244d38beb9174d09fd5b6 2024-11-22T03:48:59,232 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740/table/918b62cd1fb244d38beb9174d09fd5b6, entries=2, sequenceid=11, filesize=5.3 K 2024-11-22T03:48:59,233 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 104ms, sequenceid=11, compaction requested=false 2024-11-22T03:48:59,233 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-22T03:48:59,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T03:48:59,238 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:48:59,238 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:48:59,238 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:48:59,239 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:48:59,239 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T03:48:59,239 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T03:48:59,239 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=792140689, stopped=false 2024-11-22T03:48:59,239 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c85114ed5096,33229,1732247312946 2024-11-22T03:48:59,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:48:59,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38975-0x100658bb6930001, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:48:59,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:48:59,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38975-0x100658bb6930001, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:48:59,240 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:48:59,241 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:48:59,241 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38975-0x100658bb6930001, quorum=127.0.0.1:53951, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:48:59,241 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:48:59,241 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:48:59,241 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:48:59,241 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c85114ed5096,38975,1732247312988' ***** 2024-11-22T03:48:59,241 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T03:48:59,241 INFO [RS:0;c85114ed5096:38975 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T03:48:59,241 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T03:48:59,241 INFO [RS:0;c85114ed5096:38975 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T03:48:59,241 INFO [RS:0;c85114ed5096:38975 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T03:48:59,242 INFO [RS:0;c85114ed5096:38975 {}] regionserver.HRegionServer(3091): Received CLOSE for 4cab042691babe1517289fb93d98f2ae 2024-11-22T03:48:59,242 INFO [RS:0;c85114ed5096:38975 {}] regionserver.HRegionServer(959): stopping server c85114ed5096,38975,1732247312988 2024-11-22T03:48:59,242 INFO [RS:0;c85114ed5096:38975 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:48:59,242 INFO [RS:0;c85114ed5096:38975 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c85114ed5096:38975. 2024-11-22T03:48:59,242 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4cab042691babe1517289fb93d98f2ae, disabling compactions & flushes 2024-11-22T03:48:59,242 DEBUG [RS:0;c85114ed5096:38975 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:48:59,242 DEBUG [RS:0;c85114ed5096:38975 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:48:59,242 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae. 2024-11-22T03:48:59,242 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae. 2024-11-22T03:48:59,242 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae. after waiting 0 ms 2024-11-22T03:48:59,242 INFO [RS:0;c85114ed5096:38975 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T03:48:59,242 INFO [RS:0;c85114ed5096:38975 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T03:48:59,242 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae. 2024-11-22T03:48:59,242 INFO [RS:0;c85114ed5096:38975 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T03:48:59,242 INFO [RS:0;c85114ed5096:38975 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T03:48:59,242 INFO [RS:0;c85114ed5096:38975 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-22T03:48:59,242 DEBUG [RS:0;c85114ed5096:38975 {}] regionserver.HRegionServer(1325): Online Regions={4cab042691babe1517289fb93d98f2ae=TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae., 1588230740=hbase:meta,,1.1588230740} 2024-11-22T03:48:59,242 DEBUG [RS:0;c85114ed5096:38975 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4cab042691babe1517289fb93d98f2ae 2024-11-22T03:48:59,242 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:48:59,242 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:48:59,243 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:48:59,243 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:48:59,243 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:48:59,246 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-22T03:48:59,246 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/data/default/TestLogRolling-testLogRollOnPipelineRestart/4cab042691babe1517289fb93d98f2ae/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-22T03:48:59,247 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae. 2024-11-22T03:48:59,247 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:48:59,247 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4cab042691babe1517289fb93d98f2ae: Waiting for close lock at 1732247339242Running coprocessor pre-close hooks at 1732247339242Disabling compacts and flushes for region at 1732247339242Disabling writes for close at 1732247339242Writing region close event to WAL at 1732247339243 (+1 ms)Running coprocessor post-close hooks at 1732247339247 (+4 ms)Closed at 1732247339247 2024-11-22T03:48:59,247 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:48:59,247 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732247339242Running coprocessor pre-close hooks at 1732247339242Disabling compacts and flushes for region at 1732247339242Disabling writes for close at 1732247339243 (+1 ms)Writing region close event to WAL at 1732247339244 (+1 ms)Running coprocessor post-close hooks at 1732247339247 (+3 ms)Closed at 1732247339247 2024-11-22T03:48:59,247 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732247314336.4cab042691babe1517289fb93d98f2ae. 2024-11-22T03:48:59,247 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T03:48:59,443 INFO [RS:0;c85114ed5096:38975 {}] regionserver.HRegionServer(976): stopping server c85114ed5096,38975,1732247312988; all regions closed. 2024-11-22T03:48:59,443 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:59,443 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:59,443 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:59,443 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:59,443 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:48:59,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38737 is added to blk_1073741842_1025 (size=825) 2024-11-22T03:48:59,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34323 is added to blk_1073741842_1025 (size=825) 2024-11-22T03:48:59,642 INFO [regionserver/c85114ed5096:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T03:48:59,642 INFO [regionserver/c85114ed5096:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T03:48:59,643 INFO [regionserver/c85114ed5096:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:48:59,803 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:48:59,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:00,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:00,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:01,806 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:01,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:02,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:02,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:02,931 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T03:49:03,078 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-22T03:49:03,137 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.meta.1732247314168.meta after 4001ms 2024-11-22T03:49:03,138 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/WALs/c85114ed5096,38975,1732247312988/c85114ed5096%2C38975%2C1732247312988.meta.1732247314168.meta to hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/oldWALs/c85114ed5096%2C38975%2C1732247312988.meta.1732247314168.meta 2024-11-22T03:49:03,141 DEBUG [RS:0;c85114ed5096:38975 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/oldWALs 2024-11-22T03:49:03,141 INFO [RS:0;c85114ed5096:38975 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c85114ed5096%2C38975%2C1732247312988.meta:.meta(num 1732247339130) 2024-11-22T03:49:03,142 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:03,142 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:03,142 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:03,142 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:03,142 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:03,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34323 is added to blk_1073741840_1023 (size=1162) 2024-11-22T03:49:03,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38737 is added to blk_1073741840_1023 (size=1162) 2024-11-22T03:49:03,152 DEBUG [RS:0;c85114ed5096:38975 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/oldWALs 2024-11-22T03:49:03,152 INFO [RS:0;c85114ed5096:38975 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c85114ed5096%2C38975%2C1732247312988:(num 1732247339088) 2024-11-22T03:49:03,152 DEBUG [RS:0;c85114ed5096:38975 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:49:03,152 INFO [RS:0;c85114ed5096:38975 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:49:03,152 INFO [RS:0;c85114ed5096:38975 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:49:03,153 INFO [RS:0;c85114ed5096:38975 {}] hbase.ChoreService(370): Chore service for: regionserver/c85114ed5096:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-22T03:49:03,153 INFO [RS:0;c85114ed5096:38975 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:49:03,153 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:49:03,153 INFO [RS:0;c85114ed5096:38975 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38975 2024-11-22T03:49:03,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38975-0x100658bb6930001, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c85114ed5096,38975,1732247312988 2024-11-22T03:49:03,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:49:03,155 INFO [RS:0;c85114ed5096:38975 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:49:03,156 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c85114ed5096,38975,1732247312988] 2024-11-22T03:49:03,157 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c85114ed5096,38975,1732247312988 already deleted, retry=false 2024-11-22T03:49:03,157 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c85114ed5096,38975,1732247312988 expired; onlineServers=0 2024-11-22T03:49:03,157 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c85114ed5096,33229,1732247312946' ***** 2024-11-22T03:49:03,157 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T03:49:03,158 INFO [M:0;c85114ed5096:33229 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:49:03,158 INFO [M:0;c85114ed5096:33229 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:49:03,158 DEBUG [M:0;c85114ed5096:33229 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T03:49:03,158 DEBUG [M:0;c85114ed5096:33229 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T03:49:03,158 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T03:49:03,158 DEBUG [master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247313554 {}] cleaner.HFileCleaner(306): Exit Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247313554,5,FailOnTimeoutGroup] 2024-11-22T03:49:03,158 DEBUG [master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247313555 {}] cleaner.HFileCleaner(306): Exit Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247313555,5,FailOnTimeoutGroup] 2024-11-22T03:49:03,158 INFO [M:0;c85114ed5096:33229 {}] hbase.ChoreService(370): Chore service for: master/c85114ed5096:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T03:49:03,158 INFO [M:0;c85114ed5096:33229 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:49:03,158 DEBUG [M:0;c85114ed5096:33229 {}] master.HMaster(1795): Stopping service threads 2024-11-22T03:49:03,158 INFO [M:0;c85114ed5096:33229 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T03:49:03,158 INFO [M:0;c85114ed5096:33229 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:49:03,159 INFO [M:0;c85114ed5096:33229 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T03:49:03,159 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T03:49:03,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T03:49:03,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:49:03,159 DEBUG [M:0;c85114ed5096:33229 {}] zookeeper.ZKUtil(347): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T03:49:03,159 WARN [M:0;c85114ed5096:33229 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T03:49:03,160 INFO [M:0;c85114ed5096:33229 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/.lastflushedseqids 2024-11-22T03:49:03,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38737 is added to blk_1073741846_1030 (size=130) 2024-11-22T03:49:03,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34323 is added to blk_1073741846_1030 (size=130) 2024-11-22T03:49:03,166 INFO [M:0;c85114ed5096:33229 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T03:49:03,166 INFO [M:0;c85114ed5096:33229 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T03:49:03,166 DEBUG [M:0;c85114ed5096:33229 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:49:03,166 INFO [M:0;c85114ed5096:33229 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:49:03,166 DEBUG [M:0;c85114ed5096:33229 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:49:03,166 DEBUG [M:0;c85114ed5096:33229 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:49:03,166 DEBUG [M:0;c85114ed5096:33229 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:49:03,166 INFO [M:0;c85114ed5096:33229 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-11-22T03:49:03,166 ERROR [FSHLog-0-hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData-prefix:c85114ed5096,33229,1732247312946 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33109,DS-617819d4-916b-4576-9dac-8f869afd216e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:49:03,167 WARN [FSHLog-0-hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData-prefix:c85114ed5096,33229,1732247312946 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33109,DS-617819d4-916b-4576-9dac-8f869afd216e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:49:03,167 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog c85114ed5096%2C33229%2C1732247312946:(num 1732247313484) roll requested 2024-11-22T03:49:03,167 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C33229%2C1732247312946.1732247343167 2024-11-22T03:49:03,172 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:03,172 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:03,172 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:03,172 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:03,172 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:03,172 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/WALs/c85114ed5096,33229,1732247312946/c85114ed5096%2C33229%2C1732247312946.1732247313484 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/WALs/c85114ed5096,33229,1732247312946/c85114ed5096%2C33229%2C1732247312946.1732247343167 2024-11-22T03:49:03,172 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33109,DS-617819d4-916b-4576-9dac-8f869afd216e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:49:03,173 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33109,DS-617819d4-916b-4576-9dac-8f869afd216e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T03:49:03,173 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/WALs/c85114ed5096,33229,1732247312946/c85114ed5096%2C33229%2C1732247312946.1732247313484 2024-11-22T03:49:03,173 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43009:43009),(127.0.0.1/127.0.0.1:40797:40797)] 2024-11-22T03:49:03,173 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/WALs/c85114ed5096,33229,1732247312946/c85114ed5096%2C33229%2C1732247312946.1732247313484 is not closed yet, will try archiving it next time 2024-11-22T03:49:03,173 WARN [IPC Server handler 3 on default port 33141 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/WALs/c85114ed5096,33229,1732247312946/c85114ed5096%2C33229%2C1732247312946.1732247313484 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-22T03:49:03,173 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/WALs/c85114ed5096,33229,1732247312946/c85114ed5096%2C33229%2C1732247312946.1732247313484 after 0ms 2024-11-22T03:49:03,188 DEBUG [M:0;c85114ed5096:33229 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c1bcff32315e4bd6883cfa77a7f288aa is 82, key is hbase:meta,,1/info:regioninfo/1732247314200/Put/seqid=0 2024-11-22T03:49:03,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38737 is added to blk_1073741848_1033 (size=5672) 2024-11-22T03:49:03,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34323 is added to blk_1073741848_1033 (size=5672) 2024-11-22T03:49:03,193 INFO [M:0;c85114ed5096:33229 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c1bcff32315e4bd6883cfa77a7f288aa 2024-11-22T03:49:03,213 DEBUG [M:0;c85114ed5096:33229 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/859cfecaceb7422eb0f0a84c98a1b140 is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732247314716/Put/seqid=0 2024-11-22T03:49:03,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34323 is added to blk_1073741849_1034 (size=6117) 2024-11-22T03:49:03,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38737 is added to blk_1073741849_1034 (size=6117) 2024-11-22T03:49:03,218 INFO [M:0;c85114ed5096:33229 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/859cfecaceb7422eb0f0a84c98a1b140 2024-11-22T03:49:03,238 DEBUG [M:0;c85114ed5096:33229 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/eb9d53534cc74bd6a5ac09178718bc45 is 69, key is c85114ed5096,38975,1732247312988/rs:state/1732247313630/Put/seqid=0 2024-11-22T03:49:03,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38737 is added to blk_1073741850_1035 (size=5156) 2024-11-22T03:49:03,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34323 is added to blk_1073741850_1035 (size=5156) 2024-11-22T03:49:03,242 INFO [M:0;c85114ed5096:33229 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/eb9d53534cc74bd6a5ac09178718bc45 2024-11-22T03:49:03,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38975-0x100658bb6930001, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:49:03,257 INFO [RS:0;c85114ed5096:38975 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:49:03,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38975-0x100658bb6930001, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:49:03,257 INFO [RS:0;c85114ed5096:38975 {}] regionserver.HRegionServer(1031): Exiting; stopping=c85114ed5096,38975,1732247312988; zookeeper connection closed. 2024-11-22T03:49:03,257 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@edf9123 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@edf9123 2024-11-22T03:49:03,257 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T03:49:03,260 DEBUG [M:0;c85114ed5096:33229 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fb7272d2db6f4b6e8ad4d4a85cb71a44 is 52, key is load_balancer_on/state:d/1732247314330/Put/seqid=0 2024-11-22T03:49:03,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38737 is added to blk_1073741851_1036 (size=5056) 2024-11-22T03:49:03,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34323 is added to blk_1073741851_1036 (size=5056) 2024-11-22T03:49:03,265 INFO [M:0;c85114ed5096:33229 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fb7272d2db6f4b6e8ad4d4a85cb71a44 2024-11-22T03:49:03,271 DEBUG [M:0;c85114ed5096:33229 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c1bcff32315e4bd6883cfa77a7f288aa as hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c1bcff32315e4bd6883cfa77a7f288aa 2024-11-22T03:49:03,276 INFO [M:0;c85114ed5096:33229 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c1bcff32315e4bd6883cfa77a7f288aa, entries=8, sequenceid=56, filesize=5.5 K 2024-11-22T03:49:03,277 DEBUG [M:0;c85114ed5096:33229 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/859cfecaceb7422eb0f0a84c98a1b140 as hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/859cfecaceb7422eb0f0a84c98a1b140 2024-11-22T03:49:03,282 INFO [M:0;c85114ed5096:33229 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/859cfecaceb7422eb0f0a84c98a1b140, entries=6, sequenceid=56, filesize=6.0 K 2024-11-22T03:49:03,283 DEBUG [M:0;c85114ed5096:33229 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/eb9d53534cc74bd6a5ac09178718bc45 as hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/eb9d53534cc74bd6a5ac09178718bc45 2024-11-22T03:49:03,289 INFO [M:0;c85114ed5096:33229 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/eb9d53534cc74bd6a5ac09178718bc45, entries=1, sequenceid=56, filesize=5.0 K 2024-11-22T03:49:03,290 DEBUG [M:0;c85114ed5096:33229 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fb7272d2db6f4b6e8ad4d4a85cb71a44 as hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fb7272d2db6f4b6e8ad4d4a85cb71a44 2024-11-22T03:49:03,297 INFO [M:0;c85114ed5096:33229 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fb7272d2db6f4b6e8ad4d4a85cb71a44, entries=1, sequenceid=56, filesize=4.9 K 2024-11-22T03:49:03,298 INFO [M:0;c85114ed5096:33229 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=56, compaction requested=false 2024-11-22T03:49:03,300 INFO [M:0;c85114ed5096:33229 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:49:03,300 DEBUG [M:0;c85114ed5096:33229 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732247343166Disabling compacts and flushes for region at 1732247343166Disabling writes for close at 1732247343166Obtaining lock to block concurrent updates at 1732247343166Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732247343166Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1732247343166Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732247343174 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732247343174Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732247343187 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732247343187Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732247343198 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732247343213 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732247343213Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732247343222 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732247343237 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732247343237Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732247343247 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732247343259 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732247343260 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6cfcf8ca: reopening flushed file at 1732247343270 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4343f2e4: reopening flushed file at 1732247343276 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c65554e: reopening flushed file at 1732247343282 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3985da12: reopening flushed file at 1732247343289 (+7 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=56, compaction requested=false at 1732247343298 (+9 ms)Writing region close event to WAL at 1732247343300 (+2 ms)Closed at 1732247343300 2024-11-22T03:49:03,301 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:03,302 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:03,302 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:03,302 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:03,302 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:03,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38737 is added to blk_1073741847_1031 (size=757) 2024-11-22T03:49:03,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34323 is added to blk_1073741847_1031 (size=757) 2024-11-22T03:49:03,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:03,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:04,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,248 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,276 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,276 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,276 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,277 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,286 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,294 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,294 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,798 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T03:49:04,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:04,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:04,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,825 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,825 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,825 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,831 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,831 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,831 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:04,834 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:05,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:05,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:06,080 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-22T03:49:06,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:06,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:07,175 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/WALs/c85114ed5096,33229,1732247312946/c85114ed5096%2C33229%2C1732247312946.1732247313484 after 4002ms 2024-11-22T03:49:07,176 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/WALs/c85114ed5096,33229,1732247312946/c85114ed5096%2C33229%2C1732247312946.1732247313484 to hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/oldWALs/c85114ed5096%2C33229%2C1732247312946.1732247313484 2024-11-22T03:49:07,183 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/MasterData/oldWALs/c85114ed5096%2C33229%2C1732247312946.1732247313484 to hdfs://localhost:33141/user/jenkins/test-data/e9330b3d-5138-4b90-49b4-370f16080394/oldWALs/c85114ed5096%2C33229%2C1732247312946.1732247313484$masterlocalwal$ 2024-11-22T03:49:07,184 INFO [M:0;c85114ed5096:33229 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T03:49:07,184 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:49:07,184 INFO [M:0;c85114ed5096:33229 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33229 2024-11-22T03:49:07,184 INFO [M:0;c85114ed5096:33229 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:49:07,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:49:07,287 INFO [M:0;c85114ed5096:33229 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:49:07,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33229-0x100658bb6930000, quorum=127.0.0.1:53951, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:49:07,293 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14081bd9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:49:07,294 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e7f9298{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:49:07,294 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:49:07,294 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68208c52{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:49:07,294 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5778aad8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/hadoop.log.dir/,STOPPED} 2024-11-22T03:49:07,296 WARN [BP-894353976-172.17.0.2-1732247312367 heartbeating to localhost/127.0.0.1:33141 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:49:07,296 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:49:07,296 WARN [BP-894353976-172.17.0.2-1732247312367 heartbeating to localhost/127.0.0.1:33141 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-894353976-172.17.0.2-1732247312367 (Datanode Uuid 8f5b92ec-9a64-4f50-bf34-61b6438278c4) service to localhost/127.0.0.1:33141 2024-11-22T03:49:07,296 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:49:07,297 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/cluster_1833d8fb-dda1-899b-b10f-510f7cc75d7f/data/data3/current/BP-894353976-172.17.0.2-1732247312367 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:49:07,297 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/cluster_1833d8fb-dda1-899b-b10f-510f7cc75d7f/data/data4/current/BP-894353976-172.17.0.2-1732247312367 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:49:07,297 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:49:07,300 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@710628af{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:49:07,300 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@797b72a2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:49:07,300 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:49:07,300 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37f01e59{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:49:07,300 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26a107ff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/hadoop.log.dir/,STOPPED} 2024-11-22T03:49:07,302 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:49:07,302 WARN [BP-894353976-172.17.0.2-1732247312367 heartbeating to localhost/127.0.0.1:33141 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:49:07,302 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:49:07,302 WARN [BP-894353976-172.17.0.2-1732247312367 heartbeating to localhost/127.0.0.1:33141 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-894353976-172.17.0.2-1732247312367 (Datanode Uuid 9db1947b-5ca6-458d-a2e7-c7d00008effd) service to localhost/127.0.0.1:33141 2024-11-22T03:49:07,303 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/cluster_1833d8fb-dda1-899b-b10f-510f7cc75d7f/data/data1/current/BP-894353976-172.17.0.2-1732247312367 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:49:07,303 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/cluster_1833d8fb-dda1-899b-b10f-510f7cc75d7f/data/data2/current/BP-894353976-172.17.0.2-1732247312367 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:49:07,303 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:49:07,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@584b55a0{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:49:07,310 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1afc855{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:49:07,310 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:49:07,311 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e21b500{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:49:07,311 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@420b80b0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/hadoop.log.dir/,STOPPED} 2024-11-22T03:49:07,318 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T03:49:07,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T03:49:07,346 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:33141 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:33141 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33141 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33141 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33141 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:33141 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:33141 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33141 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 448) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=73 (was 81), ProcessCount=11 (was 11), AvailableMemoryMB=2703 (was 2929) 2024-11-22T03:49:07,353 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=73, ProcessCount=11, AvailableMemoryMB=2702 2024-11-22T03:49:07,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T03:49:07,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/hadoop.log.dir so I do NOT create it in target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c 2024-11-22T03:49:07,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cb4c6c09-6b33-cfc6-a28e-88e68e4dcb75/hadoop.tmp.dir so I do NOT create it in target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c 2024-11-22T03:49:07,353 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/cluster_2a059e11-2e62-2404-f3f0-30e680e8de61, deleteOnExit=true 2024-11-22T03:49:07,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T03:49:07,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/test.cache.data in system properties and HBase conf 2024-11-22T03:49:07,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T03:49:07,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/hadoop.log.dir in system properties and HBase conf 2024-11-22T03:49:07,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T03:49:07,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T03:49:07,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T03:49:07,354 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T03:49:07,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:49:07,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:49:07,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T03:49:07,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:49:07,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T03:49:07,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T03:49:07,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:49:07,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:49:07,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T03:49:07,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/nfs.dump.dir in system properties and HBase conf 2024-11-22T03:49:07,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/java.io.tmpdir in system properties and HBase conf 2024-11-22T03:49:07,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:49:07,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T03:49:07,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T03:49:07,367 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:49:07,411 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:49:07,415 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:49:07,416 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:49:07,416 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:49:07,416 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:49:07,416 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:49:07,417 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58d3fd6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:49:07,417 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45dd2cc3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:49:07,509 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f9b07ee{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/java.io.tmpdir/jetty-localhost-43327-hadoop-hdfs-3_4_1-tests_jar-_-any-9718182413834736941/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:49:07,510 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70854f76{HTTP/1.1, (http/1.1)}{localhost:43327} 2024-11-22T03:49:07,510 INFO [Time-limited test {}] server.Server(415): Started @184836ms 2024-11-22T03:49:07,521 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:49:07,556 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:49:07,559 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:49:07,560 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:49:07,560 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:49:07,560 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:49:07,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7114135a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:49:07,561 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b768886{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:49:07,654 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3e4472fe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/java.io.tmpdir/jetty-localhost-43885-hadoop-hdfs-3_4_1-tests_jar-_-any-15658936491989543310/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:49:07,654 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@675e37f{HTTP/1.1, (http/1.1)}{localhost:43885} 2024-11-22T03:49:07,654 INFO [Time-limited test {}] server.Server(415): Started @184980ms 2024-11-22T03:49:07,655 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:49:07,679 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:49:07,681 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:49:07,682 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:49:07,682 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:49:07,682 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:49:07,682 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b50defd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:49:07,683 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fb6de9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:49:07,711 WARN [Thread-1638 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/cluster_2a059e11-2e62-2404-f3f0-30e680e8de61/data/data2/current/BP-1594657804-172.17.0.2-1732247347376/current, will proceed with Du for space computation calculation, 2024-11-22T03:49:07,711 WARN [Thread-1637 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/cluster_2a059e11-2e62-2404-f3f0-30e680e8de61/data/data1/current/BP-1594657804-172.17.0.2-1732247347376/current, will proceed with Du for space computation calculation, 2024-11-22T03:49:07,725 WARN [Thread-1616 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:49:07,727 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8736fe2cbec27b9a with lease ID 0x9bececb3d55a4e1a: Processing first storage report for DS-892b156a-1441-4a62-943c-dd12f75b09f3 from datanode DatanodeRegistration(127.0.0.1:35279, datanodeUuid=d8fe8db0-6443-4773-af71-5f5054a486e4, infoPort=46755, infoSecurePort=0, ipcPort=39027, storageInfo=lv=-57;cid=testClusterID;nsid=909238595;c=1732247347376) 2024-11-22T03:49:07,727 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8736fe2cbec27b9a with lease ID 0x9bececb3d55a4e1a: from storage DS-892b156a-1441-4a62-943c-dd12f75b09f3 node DatanodeRegistration(127.0.0.1:35279, datanodeUuid=d8fe8db0-6443-4773-af71-5f5054a486e4, infoPort=46755, infoSecurePort=0, ipcPort=39027, storageInfo=lv=-57;cid=testClusterID;nsid=909238595;c=1732247347376), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:49:07,728 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8736fe2cbec27b9a with lease ID 0x9bececb3d55a4e1a: Processing first storage report for DS-4ef3b0aa-7349-4d00-b569-ebc3bfd75959 from datanode DatanodeRegistration(127.0.0.1:35279, datanodeUuid=d8fe8db0-6443-4773-af71-5f5054a486e4, infoPort=46755, infoSecurePort=0, ipcPort=39027, storageInfo=lv=-57;cid=testClusterID;nsid=909238595;c=1732247347376) 2024-11-22T03:49:07,728 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8736fe2cbec27b9a with lease ID 0x9bececb3d55a4e1a: from storage DS-4ef3b0aa-7349-4d00-b569-ebc3bfd75959 node DatanodeRegistration(127.0.0.1:35279, datanodeUuid=d8fe8db0-6443-4773-af71-5f5054a486e4, infoPort=46755, infoSecurePort=0, ipcPort=39027, storageInfo=lv=-57;cid=testClusterID;nsid=909238595;c=1732247347376), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:49:07,792 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c1d88de{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/java.io.tmpdir/jetty-localhost-38029-hadoop-hdfs-3_4_1-tests_jar-_-any-2438249279333791865/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:49:07,793 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@58d2b6e{HTTP/1.1, (http/1.1)}{localhost:38029} 2024-11-22T03:49:07,793 INFO [Time-limited test {}] server.Server(415): Started @185119ms 2024-11-22T03:49:07,794 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:49:07,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:07,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:07,855 WARN [Thread-1663 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/cluster_2a059e11-2e62-2404-f3f0-30e680e8de61/data/data3/current/BP-1594657804-172.17.0.2-1732247347376/current, will proceed with Du for space computation calculation, 2024-11-22T03:49:07,855 WARN [Thread-1664 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/cluster_2a059e11-2e62-2404-f3f0-30e680e8de61/data/data4/current/BP-1594657804-172.17.0.2-1732247347376/current, will proceed with Du for space computation calculation, 2024-11-22T03:49:07,869 WARN [Thread-1652 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:49:07,871 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbaed48ec0d6a145b with lease ID 0x9bececb3d55a4e1b: Processing first storage report for DS-c15a100d-e502-4ccb-8b4f-4be41b9ce4cc from datanode DatanodeRegistration(127.0.0.1:37243, datanodeUuid=e1791a5b-d903-43c6-a771-334c0fcb5c05, infoPort=44711, infoSecurePort=0, ipcPort=33025, storageInfo=lv=-57;cid=testClusterID;nsid=909238595;c=1732247347376) 2024-11-22T03:49:07,871 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbaed48ec0d6a145b with lease ID 0x9bececb3d55a4e1b: from storage DS-c15a100d-e502-4ccb-8b4f-4be41b9ce4cc node DatanodeRegistration(127.0.0.1:37243, datanodeUuid=e1791a5b-d903-43c6-a771-334c0fcb5c05, infoPort=44711, infoSecurePort=0, ipcPort=33025, storageInfo=lv=-57;cid=testClusterID;nsid=909238595;c=1732247347376), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:49:07,871 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbaed48ec0d6a145b with lease ID 0x9bececb3d55a4e1b: Processing first storage report for DS-ebb67c90-c327-4192-9234-362e105dd200 from datanode DatanodeRegistration(127.0.0.1:37243, datanodeUuid=e1791a5b-d903-43c6-a771-334c0fcb5c05, infoPort=44711, infoSecurePort=0, ipcPort=33025, storageInfo=lv=-57;cid=testClusterID;nsid=909238595;c=1732247347376) 2024-11-22T03:49:07,871 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbaed48ec0d6a145b with lease ID 0x9bececb3d55a4e1b: from storage DS-ebb67c90-c327-4192-9234-362e105dd200 node DatanodeRegistration(127.0.0.1:37243, datanodeUuid=e1791a5b-d903-43c6-a771-334c0fcb5c05, infoPort=44711, infoSecurePort=0, ipcPort=33025, storageInfo=lv=-57;cid=testClusterID;nsid=909238595;c=1732247347376), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:49:07,936 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c 2024-11-22T03:49:07,938 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/cluster_2a059e11-2e62-2404-f3f0-30e680e8de61/zookeeper_0, clientPort=62808, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/cluster_2a059e11-2e62-2404-f3f0-30e680e8de61/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/cluster_2a059e11-2e62-2404-f3f0-30e680e8de61/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T03:49:07,939 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62808 2024-11-22T03:49:07,939 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:49:07,940 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:49:07,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:49:07,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:49:07,950 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202 with version=8 2024-11-22T03:49:07,950 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/hbase-staging 2024-11-22T03:49:07,952 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c85114ed5096:0 server-side Connection retries=45 2024-11-22T03:49:07,952 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:49:07,952 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:49:07,952 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:49:07,952 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:49:07,952 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:49:07,952 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T03:49:07,952 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:49:07,953 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34055 2024-11-22T03:49:07,955 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34055 connecting to ZooKeeper ensemble=127.0.0.1:62808 2024-11-22T03:49:07,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:340550x0, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:49:07,959 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34055-0x100658c3f500000 connected 2024-11-22T03:49:07,970 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:49:07,971 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:49:07,973 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:49:07,974 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202, hbase.cluster.distributed=false 2024-11-22T03:49:07,976 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:49:07,976 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34055 2024-11-22T03:49:07,976 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34055 2024-11-22T03:49:07,977 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34055 2024-11-22T03:49:07,978 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34055 2024-11-22T03:49:07,978 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34055 2024-11-22T03:49:07,994 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c85114ed5096:0 server-side Connection retries=45 2024-11-22T03:49:07,994 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:49:07,994 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:49:07,994 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:49:07,994 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:49:07,994 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:49:07,994 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T03:49:07,995 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:49:07,995 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42443 2024-11-22T03:49:07,996 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42443 connecting to ZooKeeper ensemble=127.0.0.1:62808 2024-11-22T03:49:07,997 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:49:07,998 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:49:08,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:424430x0, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:49:08,002 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:424430x0, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:49:08,002 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42443-0x100658c3f500001 connected 2024-11-22T03:49:08,002 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T03:49:08,003 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T03:49:08,004 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42443-0x100658c3f500001, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T03:49:08,005 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42443-0x100658c3f500001, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:49:08,005 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42443 2024-11-22T03:49:08,008 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42443 2024-11-22T03:49:08,009 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42443 2024-11-22T03:49:08,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42443 2024-11-22T03:49:08,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42443 2024-11-22T03:49:08,022 DEBUG [M:0;c85114ed5096:34055 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c85114ed5096:34055 2024-11-22T03:49:08,023 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c85114ed5096,34055,1732247347952 2024-11-22T03:49:08,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:49:08,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42443-0x100658c3f500001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:49:08,024 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c85114ed5096,34055,1732247347952 2024-11-22T03:49:08,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42443-0x100658c3f500001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T03:49:08,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42443-0x100658c3f500001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:49:08,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:49:08,026 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T03:49:08,026 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c85114ed5096,34055,1732247347952 from backup master directory 2024-11-22T03:49:08,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c85114ed5096,34055,1732247347952 2024-11-22T03:49:08,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42443-0x100658c3f500001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:49:08,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:49:08,027 WARN [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:49:08,027 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c85114ed5096,34055,1732247347952 2024-11-22T03:49:08,030 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/hbase.id] with ID: 6ffd3624-4619-4de1-977f-188c8cb85ecc 2024-11-22T03:49:08,030 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/.tmp/hbase.id 2024-11-22T03:49:08,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:49:08,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:49:08,037 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/.tmp/hbase.id]:[hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/hbase.id] 2024-11-22T03:49:08,047 INFO [master/c85114ed5096:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:49:08,047 INFO [master/c85114ed5096:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T03:49:08,048 INFO [master/c85114ed5096:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-22T03:49:08,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:49:08,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42443-0x100658c3f500001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:49:08,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:49:08,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:49:08,058 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:49:08,059 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T03:49:08,059 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:49:08,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:49:08,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:49:08,066 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store 2024-11-22T03:49:08,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:49:08,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:49:08,072 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:49:08,072 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:49:08,072 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:49:08,072 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:49:08,072 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:49:08,072 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:49:08,072 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:49:08,073 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732247348072Disabling compacts and flushes for region at 1732247348072Disabling writes for close at 1732247348072Writing region close event to WAL at 1732247348072Closed at 1732247348072 2024-11-22T03:49:08,073 WARN [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/.initializing 2024-11-22T03:49:08,073 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/WALs/c85114ed5096,34055,1732247347952 2024-11-22T03:49:08,076 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C34055%2C1732247347952, suffix=, logDir=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/WALs/c85114ed5096,34055,1732247347952, archiveDir=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/oldWALs, maxLogs=10 2024-11-22T03:49:08,076 INFO [master/c85114ed5096:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C34055%2C1732247347952.1732247348076 2024-11-22T03:49:08,080 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/WALs/c85114ed5096,34055,1732247347952/c85114ed5096%2C34055%2C1732247347952.1732247348076 2024-11-22T03:49:08,083 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44711:44711),(127.0.0.1/127.0.0.1:46755:46755)] 2024-11-22T03:49:08,086 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:49:08,086 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:49:08,086 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:49:08,086 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:49:08,087 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:49:08,088 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T03:49:08,088 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:49:08,089 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:49:08,089 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:49:08,090 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T03:49:08,090 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:49:08,090 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:49:08,090 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:49:08,091 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T03:49:08,091 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:49:08,091 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:49:08,092 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:49:08,093 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T03:49:08,093 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:49:08,093 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:49:08,093 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:49:08,094 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:49:08,094 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:49:08,095 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:49:08,095 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:49:08,096 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T03:49:08,098 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:49:08,100 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:49:08,100 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=749095, jitterRate=-0.04747721552848816}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T03:49:08,101 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732247348086Initializing all the Stores at 1732247348087 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247348087Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247348087Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247348087Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247348087Cleaning up temporary data from old regions at 1732247348095 (+8 ms)Region opened successfully at 1732247348101 (+6 ms) 2024-11-22T03:49:08,101 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T03:49:08,104 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49538007, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c85114ed5096/172.17.0.2:0 2024-11-22T03:49:08,105 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T03:49:08,105 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T03:49:08,105 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T03:49:08,105 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T03:49:08,105 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T03:49:08,106 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T03:49:08,106 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T03:49:08,108 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T03:49:08,108 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T03:49:08,109 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T03:49:08,109 INFO [master/c85114ed5096:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T03:49:08,110 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T03:49:08,111 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T03:49:08,111 INFO [master/c85114ed5096:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T03:49:08,112 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T03:49:08,112 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T03:49:08,113 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T03:49:08,114 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T03:49:08,115 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T03:49:08,116 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T03:49:08,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42443-0x100658c3f500001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:49:08,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:49:08,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42443-0x100658c3f500001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:49:08,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:49:08,117 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c85114ed5096,34055,1732247347952, sessionid=0x100658c3f500000, setting cluster-up flag (Was=false) 2024-11-22T03:49:08,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42443-0x100658c3f500001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:49:08,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:49:08,121 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T03:49:08,122 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c85114ed5096,34055,1732247347952 2024-11-22T03:49:08,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42443-0x100658c3f500001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:49:08,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:49:08,128 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T03:49:08,129 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c85114ed5096,34055,1732247347952 2024-11-22T03:49:08,130 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T03:49:08,132 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T03:49:08,132 INFO [master/c85114ed5096:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T03:49:08,133 INFO [master/c85114ed5096:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T03:49:08,133 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c85114ed5096,34055,1732247347952 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T03:49:08,134 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:49:08,134 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:49:08,134 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:49:08,134 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:49:08,135 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c85114ed5096:0, corePoolSize=10, maxPoolSize=10 2024-11-22T03:49:08,135 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:49:08,135 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c85114ed5096:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:49:08,135 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:49:08,138 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732247378138 2024-11-22T03:49:08,138 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T03:49:08,138 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:49:08,138 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T03:49:08,138 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T03:49:08,138 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T03:49:08,139 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T03:49:08,139 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T03:49:08,139 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T03:49:08,139 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,139 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T03:49:08,139 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T03:49:08,139 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T03:49:08,140 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:49:08,140 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T03:49:08,140 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T03:49:08,140 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T03:49:08,140 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247348140,5,FailOnTimeoutGroup] 2024-11-22T03:49:08,141 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247348140,5,FailOnTimeoutGroup] 2024-11-22T03:49:08,141 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,141 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T03:49:08,141 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,141 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:49:08,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:49:08,152 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T03:49:08,152 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202 2024-11-22T03:49:08,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:49:08,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:49:08,159 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:49:08,160 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:49:08,161 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:49:08,161 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:49:08,162 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:49:08,162 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:49:08,163 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:49:08,163 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:49:08,163 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:49:08,163 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:49:08,164 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:49:08,164 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:49:08,165 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:49:08,165 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:49:08,166 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:49:08,166 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:49:08,166 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:49:08,166 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:49:08,167 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740 2024-11-22T03:49:08,167 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740 2024-11-22T03:49:08,168 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:49:08,168 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:49:08,169 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:49:08,169 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:49:08,171 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:49:08,171 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=769316, jitterRate=-0.021764248609542847}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:49:08,172 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732247348159Initializing all the Stores at 1732247348160 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247348160Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247348160Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247348160Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247348160Cleaning up temporary data from old regions at 1732247348168 (+8 ms)Region opened successfully at 1732247348172 (+4 ms) 2024-11-22T03:49:08,172 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:49:08,172 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:49:08,172 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:49:08,172 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:49:08,172 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:49:08,172 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:49:08,172 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732247348172Disabling compacts and flushes for region at 1732247348172Disabling writes for close at 1732247348172Writing region close event to WAL at 1732247348172Closed at 1732247348172 2024-11-22T03:49:08,174 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:49:08,174 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T03:49:08,174 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T03:49:08,175 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:49:08,176 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T03:49:08,213 INFO [RS:0;c85114ed5096:42443 {}] regionserver.HRegionServer(746): ClusterId : 6ffd3624-4619-4de1-977f-188c8cb85ecc 2024-11-22T03:49:08,213 DEBUG [RS:0;c85114ed5096:42443 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T03:49:08,214 DEBUG [RS:0;c85114ed5096:42443 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T03:49:08,215 DEBUG [RS:0;c85114ed5096:42443 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T03:49:08,216 DEBUG [RS:0;c85114ed5096:42443 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T03:49:08,216 DEBUG [RS:0;c85114ed5096:42443 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56f911a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c85114ed5096/172.17.0.2:0 2024-11-22T03:49:08,227 DEBUG [RS:0;c85114ed5096:42443 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c85114ed5096:42443 2024-11-22T03:49:08,227 INFO [RS:0;c85114ed5096:42443 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T03:49:08,227 INFO [RS:0;c85114ed5096:42443 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T03:49:08,227 DEBUG [RS:0;c85114ed5096:42443 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T03:49:08,228 INFO [RS:0;c85114ed5096:42443 {}] regionserver.HRegionServer(2659): reportForDuty to master=c85114ed5096,34055,1732247347952 with port=42443, startcode=1732247347994 2024-11-22T03:49:08,228 DEBUG [RS:0;c85114ed5096:42443 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T03:49:08,230 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59513, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T03:49:08,230 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34055 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c85114ed5096,42443,1732247347994 2024-11-22T03:49:08,230 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34055 {}] master.ServerManager(517): Registering regionserver=c85114ed5096,42443,1732247347994 2024-11-22T03:49:08,232 DEBUG [RS:0;c85114ed5096:42443 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202 2024-11-22T03:49:08,232 DEBUG [RS:0;c85114ed5096:42443 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42755 2024-11-22T03:49:08,232 DEBUG [RS:0;c85114ed5096:42443 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T03:49:08,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:49:08,234 DEBUG [RS:0;c85114ed5096:42443 {}] zookeeper.ZKUtil(111): regionserver:42443-0x100658c3f500001, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c85114ed5096,42443,1732247347994 2024-11-22T03:49:08,234 WARN [RS:0;c85114ed5096:42443 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:49:08,234 INFO [RS:0;c85114ed5096:42443 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:49:08,234 DEBUG [RS:0;c85114ed5096:42443 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/WALs/c85114ed5096,42443,1732247347994 2024-11-22T03:49:08,235 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c85114ed5096,42443,1732247347994] 2024-11-22T03:49:08,238 INFO [RS:0;c85114ed5096:42443 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T03:49:08,239 INFO [RS:0;c85114ed5096:42443 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T03:49:08,239 INFO [RS:0;c85114ed5096:42443 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T03:49:08,239 INFO [RS:0;c85114ed5096:42443 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,240 INFO [RS:0;c85114ed5096:42443 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T03:49:08,240 INFO [RS:0;c85114ed5096:42443 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T03:49:08,240 INFO [RS:0;c85114ed5096:42443 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,240 DEBUG [RS:0;c85114ed5096:42443 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:49:08,241 DEBUG [RS:0;c85114ed5096:42443 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:49:08,241 DEBUG [RS:0;c85114ed5096:42443 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:49:08,241 DEBUG [RS:0;c85114ed5096:42443 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:49:08,241 DEBUG [RS:0;c85114ed5096:42443 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:49:08,241 DEBUG [RS:0;c85114ed5096:42443 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c85114ed5096:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:49:08,241 DEBUG [RS:0;c85114ed5096:42443 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:49:08,241 DEBUG [RS:0;c85114ed5096:42443 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:49:08,241 DEBUG [RS:0;c85114ed5096:42443 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:49:08,241 DEBUG [RS:0;c85114ed5096:42443 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:49:08,241 DEBUG [RS:0;c85114ed5096:42443 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:49:08,241 DEBUG [RS:0;c85114ed5096:42443 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:49:08,241 DEBUG [RS:0;c85114ed5096:42443 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c85114ed5096:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:49:08,241 DEBUG [RS:0;c85114ed5096:42443 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:49:08,243 INFO [RS:0;c85114ed5096:42443 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,243 INFO [RS:0;c85114ed5096:42443 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,243 INFO [RS:0;c85114ed5096:42443 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,243 INFO [RS:0;c85114ed5096:42443 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,243 INFO [RS:0;c85114ed5096:42443 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,243 INFO [RS:0;c85114ed5096:42443 {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,42443,1732247347994-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:49:08,259 INFO [RS:0;c85114ed5096:42443 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T03:49:08,259 INFO [RS:0;c85114ed5096:42443 {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,42443,1732247347994-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,259 INFO [RS:0;c85114ed5096:42443 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,259 INFO [RS:0;c85114ed5096:42443 {}] regionserver.Replication(171): c85114ed5096,42443,1732247347994 started 2024-11-22T03:49:08,273 INFO [RS:0;c85114ed5096:42443 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,273 INFO [RS:0;c85114ed5096:42443 {}] regionserver.HRegionServer(1482): Serving as c85114ed5096,42443,1732247347994, RpcServer on c85114ed5096/172.17.0.2:42443, sessionid=0x100658c3f500001 2024-11-22T03:49:08,273 DEBUG [RS:0;c85114ed5096:42443 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T03:49:08,273 DEBUG [RS:0;c85114ed5096:42443 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c85114ed5096,42443,1732247347994 2024-11-22T03:49:08,273 DEBUG [RS:0;c85114ed5096:42443 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c85114ed5096,42443,1732247347994' 2024-11-22T03:49:08,273 DEBUG [RS:0;c85114ed5096:42443 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T03:49:08,274 DEBUG [RS:0;c85114ed5096:42443 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T03:49:08,274 DEBUG [RS:0;c85114ed5096:42443 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T03:49:08,274 DEBUG [RS:0;c85114ed5096:42443 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T03:49:08,274 DEBUG [RS:0;c85114ed5096:42443 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c85114ed5096,42443,1732247347994 2024-11-22T03:49:08,274 DEBUG [RS:0;c85114ed5096:42443 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c85114ed5096,42443,1732247347994' 2024-11-22T03:49:08,274 DEBUG [RS:0;c85114ed5096:42443 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T03:49:08,274 DEBUG [RS:0;c85114ed5096:42443 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T03:49:08,275 DEBUG [RS:0;c85114ed5096:42443 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T03:49:08,275 INFO [RS:0;c85114ed5096:42443 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T03:49:08,275 INFO [RS:0;c85114ed5096:42443 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T03:49:08,326 WARN [c85114ed5096:34055 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T03:49:08,377 INFO [RS:0;c85114ed5096:42443 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C42443%2C1732247347994, suffix=, logDir=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/WALs/c85114ed5096,42443,1732247347994, archiveDir=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/oldWALs, maxLogs=32 2024-11-22T03:49:08,377 INFO [RS:0;c85114ed5096:42443 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C42443%2C1732247347994.1732247348377 2024-11-22T03:49:08,382 INFO [RS:0;c85114ed5096:42443 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/WALs/c85114ed5096,42443,1732247347994/c85114ed5096%2C42443%2C1732247347994.1732247348377 2024-11-22T03:49:08,388 DEBUG [RS:0;c85114ed5096:42443 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44711:44711),(127.0.0.1/127.0.0.1:46755:46755)] 2024-11-22T03:49:08,577 DEBUG [c85114ed5096:34055 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T03:49:08,577 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c85114ed5096,42443,1732247347994 2024-11-22T03:49:08,580 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c85114ed5096,42443,1732247347994, state=OPENING 2024-11-22T03:49:08,581 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T03:49:08,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42443-0x100658c3f500001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:49:08,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:49:08,584 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:49:08,584 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:49:08,584 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:49:08,584 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c85114ed5096,42443,1732247347994}] 2024-11-22T03:49:08,739 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T03:49:08,742 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40001, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T03:49:08,749 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T03:49:08,749 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:49:08,749 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:49:08,749 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T03:49:08,749 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-22T03:49:08,751 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C42443%2C1732247347994.meta, suffix=.meta, logDir=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/WALs/c85114ed5096,42443,1732247347994, archiveDir=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/oldWALs, maxLogs=32 2024-11-22T03:49:08,752 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C42443%2C1732247347994.meta.1732247348752.meta 2024-11-22T03:49:08,758 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/WALs/c85114ed5096,42443,1732247347994/c85114ed5096%2C42443%2C1732247347994.meta.1732247348752.meta 2024-11-22T03:49:08,760 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46755:46755),(127.0.0.1/127.0.0.1:44711:44711)] 2024-11-22T03:49:08,761 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:49:08,761 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T03:49:08,761 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T03:49:08,761 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T03:49:08,761 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T03:49:08,761 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:49:08,761 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T03:49:08,762 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T03:49:08,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:49:08,764 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:49:08,764 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:49:08,765 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:49:08,765 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:49:08,766 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:49:08,766 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:49:08,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:49:08,767 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:49:08,767 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:49:08,767 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:49:08,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:49:08,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:49:08,769 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:49:08,769 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:49:08,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:49:08,769 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:49:08,770 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740 2024-11-22T03:49:08,771 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740 2024-11-22T03:49:08,773 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:49:08,773 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:49:08,773 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:49:08,775 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:49:08,776 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=705826, jitterRate=-0.10249708592891693}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:49:08,776 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T03:49:08,776 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732247348762Writing region info on filesystem at 1732247348762Initializing all the Stores at 1732247348763 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247348763Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247348763Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247348763Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247348763Cleaning up temporary data from old regions at 1732247348773 (+10 ms)Running coprocessor post-open hooks at 1732247348776 (+3 ms)Region opened successfully at 1732247348776 2024-11-22T03:49:08,777 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732247348738 2024-11-22T03:49:08,780 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T03:49:08,780 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T03:49:08,782 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c85114ed5096,42443,1732247347994 2024-11-22T03:49:08,783 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c85114ed5096,42443,1732247347994, state=OPEN 2024-11-22T03:49:08,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42443-0x100658c3f500001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:49:08,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:49:08,786 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c85114ed5096,42443,1732247347994 2024-11-22T03:49:08,786 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:49:08,786 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:49:08,790 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T03:49:08,790 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c85114ed5096,42443,1732247347994 in 202 msec 2024-11-22T03:49:08,794 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T03:49:08,794 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 616 msec 2024-11-22T03:49:08,795 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:49:08,795 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T03:49:08,796 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:49:08,797 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c85114ed5096,42443,1732247347994, seqNum=-1] 2024-11-22T03:49:08,797 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:49:08,798 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44027, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:49:08,805 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 672 msec 2024-11-22T03:49:08,805 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732247348805, completionTime=-1 2024-11-22T03:49:08,806 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T03:49:08,806 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T03:49:08,808 INFO [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T03:49:08,808 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732247408808 2024-11-22T03:49:08,808 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732247468808 2024-11-22T03:49:08,808 INFO [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T03:49:08,808 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,34055,1732247347952-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,808 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,34055,1732247347952-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,808 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,34055,1732247347952-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,809 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c85114ed5096:34055, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,809 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,809 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,811 DEBUG [master/c85114ed5096:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T03:49:08,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:08,812 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.785sec 2024-11-22T03:49:08,812 INFO [master/c85114ed5096:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T03:49:08,812 INFO [master/c85114ed5096:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T03:49:08,812 INFO [master/c85114ed5096:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T03:49:08,812 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T03:49:08,812 INFO [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T03:49:08,813 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,34055,1732247347952-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:49:08,813 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,34055,1732247347952-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T03:49:08,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:08,816 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T03:49:08,816 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T03:49:08,816 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,34055,1732247347952-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:08,914 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6677bb9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:49:08,914 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c85114ed5096,34055,-1 for getting cluster id 2024-11-22T03:49:08,914 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T03:49:08,918 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6ffd3624-4619-4de1-977f-188c8cb85ecc' 2024-11-22T03:49:08,919 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T03:49:08,919 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6ffd3624-4619-4de1-977f-188c8cb85ecc" 2024-11-22T03:49:08,919 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fffe208, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:49:08,919 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c85114ed5096,34055,-1] 2024-11-22T03:49:08,920 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T03:49:08,920 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:49:08,923 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48326, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T03:49:08,924 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a860b8d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:49:08,925 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:49:08,926 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c85114ed5096,42443,1732247347994, seqNum=-1] 2024-11-22T03:49:08,926 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:49:08,927 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40562, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:49:08,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c85114ed5096,34055,1732247347952 2024-11-22T03:49:08,930 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:49:08,932 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T03:49:08,932 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T03:49:08,933 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is c85114ed5096,34055,1732247347952 2024-11-22T03:49:08,934 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1e9bda83 2024-11-22T03:49:08,934 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T03:49:08,935 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48330, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T03:49:08,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T03:49:08,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T03:49:08,936 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:49:08,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:49:08,939 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T03:49:08,939 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:49:08,939 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-22T03:49:08,940 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T03:49:08,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T03:49:08,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741835_1011 (size=405) 2024-11-22T03:49:08,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741835_1011 (size=405) 2024-11-22T03:49:08,948 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4edb224ab9398d1152ececb86060c00d, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202 2024-11-22T03:49:08,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741836_1012 (size=88) 2024-11-22T03:49:08,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741836_1012 (size=88) 2024-11-22T03:49:08,955 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:49:08,955 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 4edb224ab9398d1152ececb86060c00d, disabling compactions & flushes 2024-11-22T03:49:08,955 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:08,955 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:08,955 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. after waiting 0 ms 2024-11-22T03:49:08,955 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:08,955 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:08,955 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4edb224ab9398d1152ececb86060c00d: Waiting for close lock at 1732247348955Disabling compacts and flushes for region at 1732247348955Disabling writes for close at 1732247348955Writing region close event to WAL at 1732247348955Closed at 1732247348955 2024-11-22T03:49:08,956 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T03:49:08,957 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732247348957"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732247348957"}]},"ts":"1732247348957"} 2024-11-22T03:49:08,959 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T03:49:08,960 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T03:49:08,961 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732247348960"}]},"ts":"1732247348960"} 2024-11-22T03:49:08,963 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-22T03:49:08,963 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=4edb224ab9398d1152ececb86060c00d, ASSIGN}] 2024-11-22T03:49:08,965 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=4edb224ab9398d1152ececb86060c00d, ASSIGN 2024-11-22T03:49:08,966 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=4edb224ab9398d1152ececb86060c00d, ASSIGN; state=OFFLINE, location=c85114ed5096,42443,1732247347994; forceNewPlan=false, retain=false 2024-11-22T03:49:09,117 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4edb224ab9398d1152ececb86060c00d, regionState=OPENING, regionLocation=c85114ed5096,42443,1732247347994 2024-11-22T03:49:09,123 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=4edb224ab9398d1152ececb86060c00d, ASSIGN because future has completed 2024-11-22T03:49:09,124 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4edb224ab9398d1152ececb86060c00d, server=c85114ed5096,42443,1732247347994}] 2024-11-22T03:49:09,289 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:09,290 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4edb224ab9398d1152ececb86060c00d, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:49:09,291 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 4edb224ab9398d1152ececb86060c00d 2024-11-22T03:49:09,291 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:49:09,291 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4edb224ab9398d1152ececb86060c00d 2024-11-22T03:49:09,291 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4edb224ab9398d1152ececb86060c00d 2024-11-22T03:49:09,294 INFO [StoreOpener-4edb224ab9398d1152ececb86060c00d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4edb224ab9398d1152ececb86060c00d 2024-11-22T03:49:09,297 INFO [StoreOpener-4edb224ab9398d1152ececb86060c00d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4edb224ab9398d1152ececb86060c00d columnFamilyName info 2024-11-22T03:49:09,297 DEBUG [StoreOpener-4edb224ab9398d1152ececb86060c00d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:49:09,298 INFO [StoreOpener-4edb224ab9398d1152ececb86060c00d-1 {}] regionserver.HStore(327): Store=4edb224ab9398d1152ececb86060c00d/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:49:09,298 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4edb224ab9398d1152ececb86060c00d 2024-11-22T03:49:09,299 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d 2024-11-22T03:49:09,300 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d 2024-11-22T03:49:09,301 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4edb224ab9398d1152ececb86060c00d 2024-11-22T03:49:09,301 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4edb224ab9398d1152ececb86060c00d 2024-11-22T03:49:09,304 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4edb224ab9398d1152ececb86060c00d 2024-11-22T03:49:09,308 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:49:09,308 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4edb224ab9398d1152ececb86060c00d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=790008, jitterRate=0.004548326134681702}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T03:49:09,308 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4edb224ab9398d1152ececb86060c00d 2024-11-22T03:49:09,309 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4edb224ab9398d1152ececb86060c00d: Running coprocessor pre-open hook at 1732247349292Writing region info on filesystem at 1732247349292Initializing all the Stores at 1732247349294 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247349294Cleaning up temporary data from old regions at 1732247349301 (+7 ms)Running coprocessor post-open hooks at 1732247349308 (+7 ms)Region opened successfully at 1732247349309 (+1 ms) 2024-11-22T03:49:09,310 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d., pid=6, masterSystemTime=1732247349280 2024-11-22T03:49:09,313 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:09,313 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:09,314 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4edb224ab9398d1152ececb86060c00d, regionState=OPEN, openSeqNum=2, regionLocation=c85114ed5096,42443,1732247347994 2024-11-22T03:49:09,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4edb224ab9398d1152ececb86060c00d, server=c85114ed5096,42443,1732247347994 because future has completed 2024-11-22T03:49:09,322 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T03:49:09,322 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4edb224ab9398d1152ececb86060c00d, server=c85114ed5096,42443,1732247347994 in 195 msec 2024-11-22T03:49:09,325 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T03:49:09,325 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=4edb224ab9398d1152ececb86060c00d, ASSIGN in 359 msec 2024-11-22T03:49:09,326 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T03:49:09,326 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732247349326"}]},"ts":"1732247349326"} 2024-11-22T03:49:09,328 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-22T03:49:09,329 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T03:49:09,331 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 393 msec 2024-11-22T03:49:09,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:09,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:10,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:10,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:11,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:11,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:12,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:12,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:13,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:13,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:14,253 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T03:49:14,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:14,255 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:14,255 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:14,255 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:14,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:14,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:14,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:14,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:14,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:14,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:14,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:14,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:14,288 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:14,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:14,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:14,292 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:49:14,297 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T03:49:14,297 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-22T03:49:14,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:14,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:15,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:15,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:16,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:16,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:17,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:17,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:18,749 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T03:49:18,749 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T03:49:18,751 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:49:18,751 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T03:49:18,752 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:49:18,752 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-22T03:49:18,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:18,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:18,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T03:49:18,988 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T03:49:18,989 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-22T03:49:18,996 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:49:18,996 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:19,000 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d., hostname=c85114ed5096,42443,1732247347994, seqNum=2] 2024-11-22T03:49:19,008 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:49:19,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:49:19,014 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-22T03:49:19,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-22T03:49:19,015 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T03:49:19,016 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T03:49:19,177 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42443 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-22T03:49:19,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:19,178 INFO [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 4edb224ab9398d1152ececb86060c00d 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T03:49:19,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/.tmp/info/aed98cbbc9e44767bb8b05766219789b is 1080, key is row0001/info:/1732247359001/Put/seqid=0 2024-11-22T03:49:19,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741837_1013 (size=6033) 2024-11-22T03:49:19,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741837_1013 (size=6033) 2024-11-22T03:49:19,210 INFO [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/.tmp/info/aed98cbbc9e44767bb8b05766219789b 2024-11-22T03:49:19,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/.tmp/info/aed98cbbc9e44767bb8b05766219789b as hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/aed98cbbc9e44767bb8b05766219789b 2024-11-22T03:49:19,240 INFO [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/aed98cbbc9e44767bb8b05766219789b, entries=1, sequenceid=5, filesize=5.9 K 2024-11-22T03:49:19,241 INFO [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4edb224ab9398d1152ececb86060c00d in 63ms, sequenceid=5, compaction requested=false 2024-11-22T03:49:19,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 4edb224ab9398d1152ececb86060c00d: 2024-11-22T03:49:19,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:19,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-22T03:49:19,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-22T03:49:19,250 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-22T03:49:19,250 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 231 msec 2024-11-22T03:49:19,252 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 242 msec 2024-11-22T03:49:19,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:19,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:20,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:20,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:21,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:21,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:22,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:22,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:23,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:23,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:24,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:24,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:25,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:25,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:26,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:26,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:27,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:27,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:28,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:28,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:29,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-22T03:49:29,108 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T03:49:29,116 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:49:29,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:49:29,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-22T03:49:29,120 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-22T03:49:29,122 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T03:49:29,122 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T03:49:29,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42443 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-22T03:49:29,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:29,277 INFO [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 4edb224ab9398d1152ececb86060c00d 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T03:49:29,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/.tmp/info/183b260669df427aa7350f2e6afdc579 is 1080, key is row0002/info:/1732247369112/Put/seqid=0 2024-11-22T03:49:29,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741838_1014 (size=6033) 2024-11-22T03:49:29,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741838_1014 (size=6033) 2024-11-22T03:49:29,294 INFO [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/.tmp/info/183b260669df427aa7350f2e6afdc579 2024-11-22T03:49:29,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/.tmp/info/183b260669df427aa7350f2e6afdc579 as hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/183b260669df427aa7350f2e6afdc579 2024-11-22T03:49:29,309 INFO [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/183b260669df427aa7350f2e6afdc579, entries=1, sequenceid=9, filesize=5.9 K 2024-11-22T03:49:29,310 INFO [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4edb224ab9398d1152ececb86060c00d in 33ms, sequenceid=9, compaction requested=false 2024-11-22T03:49:29,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 4edb224ab9398d1152ececb86060c00d: 2024-11-22T03:49:29,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:29,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-22T03:49:29,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-22T03:49:29,314 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-22T03:49:29,314 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 190 msec 2024-11-22T03:49:29,316 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 199 msec 2024-11-22T03:49:29,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:29,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:30,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:30,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:31,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:31,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:32,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:32,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:33,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:33,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:33,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta after 68080ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor205.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:49:33,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 after 68089ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor205.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T03:49:34,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:34,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:35,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:35,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:36,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:36,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:37,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:37,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:37,936 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T03:49:38,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:38,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:39,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-22T03:49:39,137 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T03:49:39,142 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C42443%2C1732247347994.1732247379142 2024-11-22T03:49:39,151 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:39,152 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:39,152 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:39,152 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:39,152 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:39,152 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/WALs/c85114ed5096,42443,1732247347994/c85114ed5096%2C42443%2C1732247347994.1732247348377 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/WALs/c85114ed5096,42443,1732247347994/c85114ed5096%2C42443%2C1732247347994.1732247379142 2024-11-22T03:49:39,154 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46755:46755),(127.0.0.1/127.0.0.1:44711:44711)] 2024-11-22T03:49:39,154 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/WALs/c85114ed5096,42443,1732247347994/c85114ed5096%2C42443%2C1732247347994.1732247348377 is not closed yet, will try archiving it next time 2024-11-22T03:49:39,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741833_1009 (size=5546) 2024-11-22T03:49:39,155 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:49:39,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741833_1009 (size=5546) 2024-11-22T03:49:39,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:49:39,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-22T03:49:39,160 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-22T03:49:39,162 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T03:49:39,162 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T03:49:39,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42443 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-22T03:49:39,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:39,316 INFO [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 4edb224ab9398d1152ececb86060c00d 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T03:49:39,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/.tmp/info/a2f67e18bff34e08b49aa299ed20f800 is 1080, key is row0003/info:/1732247379139/Put/seqid=0 2024-11-22T03:49:39,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741840_1016 (size=6033) 2024-11-22T03:49:39,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741840_1016 (size=6033) 2024-11-22T03:49:39,325 INFO [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/.tmp/info/a2f67e18bff34e08b49aa299ed20f800 2024-11-22T03:49:39,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/.tmp/info/a2f67e18bff34e08b49aa299ed20f800 as hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/a2f67e18bff34e08b49aa299ed20f800 2024-11-22T03:49:39,337 INFO [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/a2f67e18bff34e08b49aa299ed20f800, entries=1, sequenceid=13, filesize=5.9 K 2024-11-22T03:49:39,338 INFO [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4edb224ab9398d1152ececb86060c00d in 22ms, sequenceid=13, compaction requested=true 2024-11-22T03:49:39,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 4edb224ab9398d1152ececb86060c00d: 2024-11-22T03:49:39,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:39,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-22T03:49:39,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-22T03:49:39,342 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-22T03:49:39,342 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-11-22T03:49:39,345 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 188 msec 2024-11-22T03:49:39,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:39,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:40,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:40,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:41,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:41,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:42,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:42,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:43,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:43,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:44,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:44,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:45,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:45,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:46,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:46,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:47,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:47,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:48,841 INFO [master/c85114ed5096:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T03:49:48,841 INFO [master/c85114ed5096:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T03:49:48,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:48,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:49,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-22T03:49:49,247 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T03:49:49,247 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:49:49,248 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:49:49,249 DEBUG [Time-limited test {}] regionserver.HStore(1541): 4edb224ab9398d1152ececb86060c00d/info is initiating minor compaction (all files) 2024-11-22T03:49:49,249 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T03:49:49,249 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:49:49,249 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 4edb224ab9398d1152ececb86060c00d/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:49,249 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/aed98cbbc9e44767bb8b05766219789b, hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/183b260669df427aa7350f2e6afdc579, hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/a2f67e18bff34e08b49aa299ed20f800] into tmpdir=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/.tmp, totalSize=17.7 K 2024-11-22T03:49:49,250 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting aed98cbbc9e44767bb8b05766219789b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732247359001 2024-11-22T03:49:49,250 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 183b260669df427aa7350f2e6afdc579, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732247369112 2024-11-22T03:49:49,251 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting a2f67e18bff34e08b49aa299ed20f800, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732247379139 2024-11-22T03:49:49,262 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 4edb224ab9398d1152ececb86060c00d#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:49:49,263 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/.tmp/info/44e6d0996df54180994e4b212a54e64a is 1080, key is row0001/info:/1732247359001/Put/seqid=0 2024-11-22T03:49:49,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741841_1017 (size=8296) 2024-11-22T03:49:49,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741841_1017 (size=8296) 2024-11-22T03:49:49,275 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/.tmp/info/44e6d0996df54180994e4b212a54e64a as hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/44e6d0996df54180994e4b212a54e64a 2024-11-22T03:49:49,281 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4edb224ab9398d1152ececb86060c00d/info of 4edb224ab9398d1152ececb86060c00d into 44e6d0996df54180994e4b212a54e64a(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:49:49,281 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 4edb224ab9398d1152ececb86060c00d: 2024-11-22T03:49:49,284 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C42443%2C1732247347994.1732247389284 2024-11-22T03:49:49,290 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:49,290 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:49,290 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:49,290 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:49,291 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:49,291 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/WALs/c85114ed5096,42443,1732247347994/c85114ed5096%2C42443%2C1732247347994.1732247379142 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/WALs/c85114ed5096,42443,1732247347994/c85114ed5096%2C42443%2C1732247347994.1732247389284 2024-11-22T03:49:49,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741839_1015 (size=2520) 2024-11-22T03:49:49,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741839_1015 (size=2520) 2024-11-22T03:49:49,297 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46755:46755),(127.0.0.1/127.0.0.1:44711:44711)] 2024-11-22T03:49:49,297 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/WALs/c85114ed5096,42443,1732247347994/c85114ed5096%2C42443%2C1732247347994.1732247348377 to hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/oldWALs/c85114ed5096%2C42443%2C1732247347994.1732247348377 2024-11-22T03:49:49,298 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:49:49,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:49:49,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-22T03:49:49,301 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-22T03:49:49,302 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T03:49:49,302 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T03:49:49,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42443 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-22T03:49:49,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:49,455 INFO [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 4edb224ab9398d1152ececb86060c00d 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T03:49:49,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/.tmp/info/34f652b58d544e6fb9c6a5e7af78a7a5 is 1080, key is row0000/info:/1732247389282/Put/seqid=0 2024-11-22T03:49:49,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741843_1019 (size=6033) 2024-11-22T03:49:49,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741843_1019 (size=6033) 2024-11-22T03:49:49,465 INFO [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/.tmp/info/34f652b58d544e6fb9c6a5e7af78a7a5 2024-11-22T03:49:49,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/.tmp/info/34f652b58d544e6fb9c6a5e7af78a7a5 as hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/34f652b58d544e6fb9c6a5e7af78a7a5 2024-11-22T03:49:49,476 INFO [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/34f652b58d544e6fb9c6a5e7af78a7a5, entries=1, sequenceid=18, filesize=5.9 K 2024-11-22T03:49:49,477 INFO [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4edb224ab9398d1152ececb86060c00d in 22ms, sequenceid=18, compaction requested=false 2024-11-22T03:49:49,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 4edb224ab9398d1152ececb86060c00d: 2024-11-22T03:49:49,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:49,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-22T03:49:49,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-22T03:49:49,482 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-22T03:49:49,482 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-11-22T03:49:49,485 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-11-22T03:49:49,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:49,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:50,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:50,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:51,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:51,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:52,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:52,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:53,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:53,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:54,291 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 4edb224ab9398d1152ececb86060c00d, had cached 0 bytes from a total of 14329 2024-11-22T03:49:54,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:54,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:55,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:55,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:56,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:56,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:57,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:57,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:58,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:58,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:59,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34055 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-22T03:49:59,358 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T03:49:59,365 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C42443%2C1732247347994.1732247399365 2024-11-22T03:49:59,371 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:59,372 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:59,372 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:59,372 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:59,372 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:59,372 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/WALs/c85114ed5096,42443,1732247347994/c85114ed5096%2C42443%2C1732247347994.1732247389284 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/WALs/c85114ed5096,42443,1732247347994/c85114ed5096%2C42443%2C1732247347994.1732247399365 2024-11-22T03:49:59,373 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46755:46755),(127.0.0.1/127.0.0.1:44711:44711)] 2024-11-22T03:49:59,373 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/WALs/c85114ed5096,42443,1732247347994/c85114ed5096%2C42443%2C1732247347994.1732247389284 is not closed yet, will try archiving it next time 2024-11-22T03:49:59,373 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/WALs/c85114ed5096,42443,1732247347994/c85114ed5096%2C42443%2C1732247347994.1732247379142 to hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/oldWALs/c85114ed5096%2C42443%2C1732247347994.1732247379142 2024-11-22T03:49:59,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T03:49:59,373 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:49:59,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741842_1018 (size=2026) 2024-11-22T03:49:59,374 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:49:59,374 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:49:59,374 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:49:59,374 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T03:49:59,374 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T03:49:59,374 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1392482268, stopped=false 2024-11-22T03:49:59,374 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c85114ed5096,34055,1732247347952 2024-11-22T03:49:59,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741842_1018 (size=2026) 2024-11-22T03:49:59,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42443-0x100658c3f500001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:49:59,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:49:59,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42443-0x100658c3f500001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:49:59,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:49:59,376 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:49:59,376 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:49:59,376 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:49:59,376 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:49:59,376 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:49:59,376 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42443-0x100658c3f500001, quorum=127.0.0.1:62808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:49:59,377 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c85114ed5096,42443,1732247347994' ***** 2024-11-22T03:49:59,377 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T03:49:59,377 INFO [RS:0;c85114ed5096:42443 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T03:49:59,377 INFO [RS:0;c85114ed5096:42443 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T03:49:59,377 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T03:49:59,377 INFO [RS:0;c85114ed5096:42443 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T03:49:59,377 INFO [RS:0;c85114ed5096:42443 {}] regionserver.HRegionServer(3091): Received CLOSE for 4edb224ab9398d1152ececb86060c00d 2024-11-22T03:49:59,377 INFO [RS:0;c85114ed5096:42443 {}] regionserver.HRegionServer(959): stopping server c85114ed5096,42443,1732247347994 2024-11-22T03:49:59,377 INFO [RS:0;c85114ed5096:42443 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:49:59,377 INFO [RS:0;c85114ed5096:42443 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c85114ed5096:42443. 2024-11-22T03:49:59,377 DEBUG [RS:0;c85114ed5096:42443 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:49:59,377 DEBUG [RS:0;c85114ed5096:42443 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:49:59,377 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4edb224ab9398d1152ececb86060c00d, disabling compactions & flushes 2024-11-22T03:49:59,378 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:59,378 INFO [RS:0;c85114ed5096:42443 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T03:49:59,378 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:59,378 INFO [RS:0;c85114ed5096:42443 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T03:49:59,378 INFO [RS:0;c85114ed5096:42443 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T03:49:59,378 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. after waiting 0 ms 2024-11-22T03:49:59,378 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:59,378 INFO [RS:0;c85114ed5096:42443 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T03:49:59,378 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 4edb224ab9398d1152ececb86060c00d 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T03:49:59,378 INFO [RS:0;c85114ed5096:42443 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-22T03:49:59,378 DEBUG [RS:0;c85114ed5096:42443 {}] regionserver.HRegionServer(1325): Online Regions={4edb224ab9398d1152ececb86060c00d=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d., 1588230740=hbase:meta,,1.1588230740} 2024-11-22T03:49:59,378 DEBUG [RS:0;c85114ed5096:42443 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4edb224ab9398d1152ececb86060c00d 2024-11-22T03:49:59,378 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:49:59,378 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:49:59,378 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:49:59,378 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:49:59,378 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:49:59,378 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-22T03:49:59,382 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/.tmp/info/3ea8f065677e43668c2ea71adf04fd6a is 1080, key is row0001/info:/1732247399362/Put/seqid=0 2024-11-22T03:49:59,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741845_1021 (size=6033) 2024-11-22T03:49:59,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741845_1021 (size=6033) 2024-11-22T03:49:59,388 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/.tmp/info/3ea8f065677e43668c2ea71adf04fd6a 2024-11-22T03:49:59,394 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/.tmp/info/3ea8f065677e43668c2ea71adf04fd6a as hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/3ea8f065677e43668c2ea71adf04fd6a 2024-11-22T03:49:59,395 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740/.tmp/info/acdccfa0831647f29bd87afc91347b2b is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d./info:regioninfo/1732247349314/Put/seqid=0 2024-11-22T03:49:59,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741846_1022 (size=7308) 2024-11-22T03:49:59,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741846_1022 (size=7308) 2024-11-22T03:49:59,400 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740/.tmp/info/acdccfa0831647f29bd87afc91347b2b 2024-11-22T03:49:59,400 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/3ea8f065677e43668c2ea71adf04fd6a, entries=1, sequenceid=22, filesize=5.9 K 2024-11-22T03:49:59,401 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4edb224ab9398d1152ececb86060c00d in 23ms, sequenceid=22, compaction requested=true 2024-11-22T03:49:59,402 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/aed98cbbc9e44767bb8b05766219789b, hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/183b260669df427aa7350f2e6afdc579, hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/a2f67e18bff34e08b49aa299ed20f800] to archive 2024-11-22T03:49:59,402 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T03:49:59,404 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/aed98cbbc9e44767bb8b05766219789b to hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/aed98cbbc9e44767bb8b05766219789b 2024-11-22T03:49:59,405 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/183b260669df427aa7350f2e6afdc579 to hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/183b260669df427aa7350f2e6afdc579 2024-11-22T03:49:59,407 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/a2f67e18bff34e08b49aa299ed20f800 to hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/info/a2f67e18bff34e08b49aa299ed20f800 2024-11-22T03:49:59,407 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c85114ed5096:34055 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-22T03:49:59,407 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [aed98cbbc9e44767bb8b05766219789b=6033, 183b260669df427aa7350f2e6afdc579=6033, a2f67e18bff34e08b49aa299ed20f800=6033] 2024-11-22T03:49:59,410 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4edb224ab9398d1152ececb86060c00d/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-22T03:49:59,411 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:59,411 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4edb224ab9398d1152ececb86060c00d: Waiting for close lock at 1732247399377Running coprocessor pre-close hooks at 1732247399377Disabling compacts and flushes for region at 1732247399377Disabling writes for close at 1732247399378 (+1 ms)Obtaining lock to block concurrent updates at 1732247399378Preparing flush snapshotting stores in 4edb224ab9398d1152ececb86060c00d at 1732247399378Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732247399378Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. at 1732247399379 (+1 ms)Flushing 4edb224ab9398d1152ececb86060c00d/info: creating writer at 1732247399379Flushing 4edb224ab9398d1152ececb86060c00d/info: appending metadata at 1732247399381 (+2 ms)Flushing 4edb224ab9398d1152ececb86060c00d/info: closing flushed file at 1732247399381Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@af88529: reopening flushed file at 1732247399393 (+12 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4edb224ab9398d1152ececb86060c00d in 23ms, sequenceid=22, compaction requested=true at 1732247399401 (+8 ms)Writing region close event to WAL at 1732247399408 (+7 ms)Running coprocessor post-close hooks at 1732247399411 (+3 ms)Closed at 1732247399411 2024-11-22T03:49:59,411 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732247348935.4edb224ab9398d1152ececb86060c00d. 2024-11-22T03:49:59,426 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740/.tmp/ns/b5ab588e07c1471d82068d0d1655d675 is 43, key is default/ns:d/1732247348799/Put/seqid=0 2024-11-22T03:49:59,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741847_1023 (size=5153) 2024-11-22T03:49:59,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741847_1023 (size=5153) 2024-11-22T03:49:59,430 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740/.tmp/ns/b5ab588e07c1471d82068d0d1655d675 2024-11-22T03:49:59,448 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740/.tmp/table/8486787bcd4e4576a6c0af7192f670b7 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732247349326/Put/seqid=0 2024-11-22T03:49:59,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741848_1024 (size=5508) 2024-11-22T03:49:59,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741848_1024 (size=5508) 2024-11-22T03:49:59,453 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740/.tmp/table/8486787bcd4e4576a6c0af7192f670b7 2024-11-22T03:49:59,458 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740/.tmp/info/acdccfa0831647f29bd87afc91347b2b as hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740/info/acdccfa0831647f29bd87afc91347b2b 2024-11-22T03:49:59,464 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740/info/acdccfa0831647f29bd87afc91347b2b, entries=10, sequenceid=11, filesize=7.1 K 2024-11-22T03:49:59,465 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740/.tmp/ns/b5ab588e07c1471d82068d0d1655d675 as hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740/ns/b5ab588e07c1471d82068d0d1655d675 2024-11-22T03:49:59,471 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740/ns/b5ab588e07c1471d82068d0d1655d675, entries=2, sequenceid=11, filesize=5.0 K 2024-11-22T03:49:59,472 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740/.tmp/table/8486787bcd4e4576a6c0af7192f670b7 as hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740/table/8486787bcd4e4576a6c0af7192f670b7 2024-11-22T03:49:59,478 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740/table/8486787bcd4e4576a6c0af7192f670b7, entries=2, sequenceid=11, filesize=5.4 K 2024-11-22T03:49:59,479 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 101ms, sequenceid=11, compaction requested=false 2024-11-22T03:49:59,484 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-22T03:49:59,484 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:49:59,484 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:49:59,484 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732247399378Running coprocessor pre-close hooks at 1732247399378Disabling compacts and flushes for region at 1732247399378Disabling writes for close at 1732247399378Obtaining lock to block concurrent updates at 1732247399378Preparing flush snapshotting stores in 1588230740 at 1732247399378Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732247399378Flushing stores of hbase:meta,,1.1588230740 at 1732247399379 (+1 ms)Flushing 1588230740/info: creating writer at 1732247399379Flushing 1588230740/info: appending metadata at 1732247399394 (+15 ms)Flushing 1588230740/info: closing flushed file at 1732247399394Flushing 1588230740/ns: creating writer at 1732247399407 (+13 ms)Flushing 1588230740/ns: appending metadata at 1732247399425 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1732247399425Flushing 1588230740/table: creating writer at 1732247399435 (+10 ms)Flushing 1588230740/table: appending metadata at 1732247399447 (+12 ms)Flushing 1588230740/table: closing flushed file at 1732247399447Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c39d5c9: reopening flushed file at 1732247399457 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1cb4006d: reopening flushed file at 1732247399464 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@583a51c2: reopening flushed file at 1732247399471 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 101ms, sequenceid=11, compaction requested=false at 1732247399479 (+8 ms)Writing region close event to WAL at 1732247399480 (+1 ms)Running coprocessor post-close hooks at 1732247399484 (+4 ms)Closed at 1732247399484 2024-11-22T03:49:59,484 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T03:49:59,578 INFO [RS:0;c85114ed5096:42443 {}] regionserver.HRegionServer(976): stopping server c85114ed5096,42443,1732247347994; all regions closed. 2024-11-22T03:49:59,579 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:59,579 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:59,579 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:59,579 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:59,580 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:59,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741834_1010 (size=3306) 2024-11-22T03:49:59,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741834_1010 (size=3306) 2024-11-22T03:49:59,589 DEBUG [RS:0;c85114ed5096:42443 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/oldWALs 2024-11-22T03:49:59,589 INFO [RS:0;c85114ed5096:42443 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c85114ed5096%2C42443%2C1732247347994.meta:.meta(num 1732247348752) 2024-11-22T03:49:59,590 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:59,590 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:59,590 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:59,591 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:59,591 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:59,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741844_1020 (size=1252) 2024-11-22T03:49:59,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741844_1020 (size=1252) 2024-11-22T03:49:59,597 DEBUG [RS:0;c85114ed5096:42443 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/oldWALs 2024-11-22T03:49:59,597 INFO [RS:0;c85114ed5096:42443 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c85114ed5096%2C42443%2C1732247347994:(num 1732247399365) 2024-11-22T03:49:59,597 DEBUG [RS:0;c85114ed5096:42443 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:49:59,597 INFO [RS:0;c85114ed5096:42443 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:49:59,597 INFO [RS:0;c85114ed5096:42443 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:49:59,597 INFO [RS:0;c85114ed5096:42443 {}] hbase.ChoreService(370): Chore service for: regionserver/c85114ed5096:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T03:49:59,597 INFO [RS:0;c85114ed5096:42443 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:49:59,597 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:49:59,598 INFO [RS:0;c85114ed5096:42443 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42443 2024-11-22T03:49:59,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42443-0x100658c3f500001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c85114ed5096,42443,1732247347994 2024-11-22T03:49:59,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:49:59,599 INFO [RS:0;c85114ed5096:42443 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:49:59,600 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c85114ed5096,42443,1732247347994] 2024-11-22T03:49:59,601 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c85114ed5096,42443,1732247347994 already deleted, retry=false 2024-11-22T03:49:59,601 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c85114ed5096,42443,1732247347994 expired; onlineServers=0 2024-11-22T03:49:59,601 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c85114ed5096,34055,1732247347952' ***** 2024-11-22T03:49:59,601 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T03:49:59,601 INFO [M:0;c85114ed5096:34055 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:49:59,601 INFO [M:0;c85114ed5096:34055 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:49:59,601 DEBUG [M:0;c85114ed5096:34055 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T03:49:59,601 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T03:49:59,601 DEBUG [M:0;c85114ed5096:34055 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T03:49:59,601 DEBUG [master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247348140 {}] cleaner.HFileCleaner(306): Exit Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247348140,5,FailOnTimeoutGroup] 2024-11-22T03:49:59,601 DEBUG [master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247348140 {}] cleaner.HFileCleaner(306): Exit Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247348140,5,FailOnTimeoutGroup] 2024-11-22T03:49:59,601 INFO [M:0;c85114ed5096:34055 {}] hbase.ChoreService(370): Chore service for: master/c85114ed5096:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T03:49:59,601 INFO [M:0;c85114ed5096:34055 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:49:59,602 DEBUG [M:0;c85114ed5096:34055 {}] master.HMaster(1795): Stopping service threads 2024-11-22T03:49:59,602 INFO [M:0;c85114ed5096:34055 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T03:49:59,602 INFO [M:0;c85114ed5096:34055 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:49:59,602 INFO [M:0;c85114ed5096:34055 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T03:49:59,602 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T03:49:59,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T03:49:59,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:49:59,602 DEBUG [M:0;c85114ed5096:34055 {}] zookeeper.ZKUtil(347): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T03:49:59,602 WARN [M:0;c85114ed5096:34055 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T03:49:59,603 INFO [M:0;c85114ed5096:34055 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/.lastflushedseqids 2024-11-22T03:49:59,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741849_1025 (size=130) 2024-11-22T03:49:59,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741849_1025 (size=130) 2024-11-22T03:49:59,608 INFO [M:0;c85114ed5096:34055 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T03:49:59,608 INFO [M:0;c85114ed5096:34055 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T03:49:59,609 DEBUG [M:0;c85114ed5096:34055 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:49:59,609 INFO [M:0;c85114ed5096:34055 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:49:59,609 DEBUG [M:0;c85114ed5096:34055 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:49:59,609 DEBUG [M:0;c85114ed5096:34055 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:49:59,609 DEBUG [M:0;c85114ed5096:34055 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:49:59,609 INFO [M:0;c85114ed5096:34055 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.57 KB heapSize=54.98 KB 2024-11-22T03:49:59,622 DEBUG [M:0;c85114ed5096:34055 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f496223b1e2b4f459a4d3a30b448fe0d is 82, key is hbase:meta,,1/info:regioninfo/1732247348781/Put/seqid=0 2024-11-22T03:49:59,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741850_1026 (size=5672) 2024-11-22T03:49:59,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741850_1026 (size=5672) 2024-11-22T03:49:59,627 INFO [M:0;c85114ed5096:34055 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f496223b1e2b4f459a4d3a30b448fe0d 2024-11-22T03:49:59,646 DEBUG [M:0;c85114ed5096:34055 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/10e7ee7a3cf149be958c66799f1c8596 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732247349330/Put/seqid=0 2024-11-22T03:49:59,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741851_1027 (size=7821) 2024-11-22T03:49:59,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741851_1027 (size=7821) 2024-11-22T03:49:59,651 INFO [M:0;c85114ed5096:34055 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.97 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/10e7ee7a3cf149be958c66799f1c8596 2024-11-22T03:49:59,655 INFO [M:0;c85114ed5096:34055 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 10e7ee7a3cf149be958c66799f1c8596 2024-11-22T03:49:59,671 DEBUG [M:0;c85114ed5096:34055 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ac514c0560fb4f48b43912e99a27f4b1 is 69, key is c85114ed5096,42443,1732247347994/rs:state/1732247348231/Put/seqid=0 2024-11-22T03:49:59,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741852_1028 (size=5156) 2024-11-22T03:49:59,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741852_1028 (size=5156) 2024-11-22T03:49:59,676 INFO [M:0;c85114ed5096:34055 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ac514c0560fb4f48b43912e99a27f4b1 2024-11-22T03:49:59,695 DEBUG [M:0;c85114ed5096:34055 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/23faea359bfa48fc8a61bbbedd76a761 is 52, key is load_balancer_on/state:d/1732247348931/Put/seqid=0 2024-11-22T03:49:59,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741853_1029 (size=5056) 2024-11-22T03:49:59,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741853_1029 (size=5056) 2024-11-22T03:49:59,699 INFO [M:0;c85114ed5096:34055 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/23faea359bfa48fc8a61bbbedd76a761 2024-11-22T03:49:59,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42443-0x100658c3f500001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:49:59,700 INFO [RS:0;c85114ed5096:42443 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:49:59,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42443-0x100658c3f500001, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:49:59,700 INFO [RS:0;c85114ed5096:42443 {}] regionserver.HRegionServer(1031): Exiting; stopping=c85114ed5096,42443,1732247347994; zookeeper connection closed. 2024-11-22T03:49:59,700 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2ed1f4bc {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2ed1f4bc 2024-11-22T03:49:59,700 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T03:49:59,705 DEBUG [M:0;c85114ed5096:34055 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f496223b1e2b4f459a4d3a30b448fe0d as hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f496223b1e2b4f459a4d3a30b448fe0d 2024-11-22T03:49:59,710 INFO [M:0;c85114ed5096:34055 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f496223b1e2b4f459a4d3a30b448fe0d, entries=8, sequenceid=121, filesize=5.5 K 2024-11-22T03:49:59,711 DEBUG [M:0;c85114ed5096:34055 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/10e7ee7a3cf149be958c66799f1c8596 as hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/10e7ee7a3cf149be958c66799f1c8596 2024-11-22T03:49:59,718 INFO [M:0;c85114ed5096:34055 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 10e7ee7a3cf149be958c66799f1c8596 2024-11-22T03:49:59,718 INFO [M:0;c85114ed5096:34055 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/10e7ee7a3cf149be958c66799f1c8596, entries=14, sequenceid=121, filesize=7.6 K 2024-11-22T03:49:59,721 DEBUG [M:0;c85114ed5096:34055 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ac514c0560fb4f48b43912e99a27f4b1 as hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ac514c0560fb4f48b43912e99a27f4b1 2024-11-22T03:49:59,728 INFO [M:0;c85114ed5096:34055 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ac514c0560fb4f48b43912e99a27f4b1, entries=1, sequenceid=121, filesize=5.0 K 2024-11-22T03:49:59,729 DEBUG [M:0;c85114ed5096:34055 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/23faea359bfa48fc8a61bbbedd76a761 as hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/23faea359bfa48fc8a61bbbedd76a761 2024-11-22T03:49:59,736 INFO [M:0;c85114ed5096:34055 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42755/user/jenkins/test-data/20035f82-5ca8-114e-74e0-4f62259ad202/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/23faea359bfa48fc8a61bbbedd76a761, entries=1, sequenceid=121, filesize=4.9 K 2024-11-22T03:49:59,738 INFO [M:0;c85114ed5096:34055 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.57 KB/44620, heapSize ~54.92 KB/56240, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=121, compaction requested=false 2024-11-22T03:49:59,741 INFO [M:0;c85114ed5096:34055 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:49:59,741 DEBUG [M:0;c85114ed5096:34055 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732247399609Disabling compacts and flushes for region at 1732247399609Disabling writes for close at 1732247399609Obtaining lock to block concurrent updates at 1732247399609Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732247399609Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44620, getHeapSize=56240, getOffHeapSize=0, getCellsCount=140 at 1732247399609Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732247399610 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732247399610Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732247399622 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732247399622Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732247399631 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732247399646 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732247399646Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732247399655 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732247399670 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732247399670Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732247399680 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732247399694 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732247399694Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@200bc44f: reopening flushed file at 1732247399704 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@62701589: reopening flushed file at 1732247399710 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7fd5dd76: reopening flushed file at 1732247399719 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32175aac: reopening flushed file at 1732247399728 (+9 ms)Finished flush of dataSize ~43.57 KB/44620, heapSize ~54.92 KB/56240, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=121, compaction requested=false at 1732247399738 (+10 ms)Writing region close event to WAL at 1732247399741 (+3 ms)Closed at 1732247399741 2024-11-22T03:49:59,741 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:59,741 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:59,742 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:59,742 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:59,742 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:49:59,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35279 is added to blk_1073741830_1006 (size=53017) 2024-11-22T03:49:59,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37243 is added to blk_1073741830_1006 (size=53017) 2024-11-22T03:49:59,747 INFO [M:0;c85114ed5096:34055 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T03:49:59,747 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:49:59,748 INFO [M:0;c85114ed5096:34055 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34055 2024-11-22T03:49:59,748 INFO [M:0;c85114ed5096:34055 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:49:59,849 INFO [M:0;c85114ed5096:34055 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:49:59,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:49:59,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34055-0x100658c3f500000, quorum=127.0.0.1:62808, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:49:59,853 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c1d88de{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:49:59,853 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@58d2b6e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:49:59,853 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:49:59,853 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fb6de9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:49:59,854 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b50defd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/hadoop.log.dir/,STOPPED} 2024-11-22T03:49:59,857 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:49:59,857 WARN [BP-1594657804-172.17.0.2-1732247347376 heartbeating to localhost/127.0.0.1:42755 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:49:59,857 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:49:59,857 WARN [BP-1594657804-172.17.0.2-1732247347376 heartbeating to localhost/127.0.0.1:42755 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1594657804-172.17.0.2-1732247347376 (Datanode Uuid e1791a5b-d903-43c6-a771-334c0fcb5c05) service to localhost/127.0.0.1:42755 2024-11-22T03:49:59,858 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/cluster_2a059e11-2e62-2404-f3f0-30e680e8de61/data/data3/current/BP-1594657804-172.17.0.2-1732247347376 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:49:59,858 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/cluster_2a059e11-2e62-2404-f3f0-30e680e8de61/data/data4/current/BP-1594657804-172.17.0.2-1732247347376 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:49:59,859 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:49:59,864 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3e4472fe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:49:59,865 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@675e37f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:49:59,865 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:49:59,865 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b768886{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:49:59,865 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7114135a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/hadoop.log.dir/,STOPPED} 2024-11-22T03:49:59,866 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:49:59,866 WARN [BP-1594657804-172.17.0.2-1732247347376 heartbeating to localhost/127.0.0.1:42755 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:49:59,866 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:49:59,866 WARN [BP-1594657804-172.17.0.2-1732247347376 heartbeating to localhost/127.0.0.1:42755 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1594657804-172.17.0.2-1732247347376 (Datanode Uuid d8fe8db0-6443-4773-af71-5f5054a486e4) service to localhost/127.0.0.1:42755 2024-11-22T03:49:59,867 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/cluster_2a059e11-2e62-2404-f3f0-30e680e8de61/data/data1/current/BP-1594657804-172.17.0.2-1732247347376 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:49:59,867 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/cluster_2a059e11-2e62-2404-f3f0-30e680e8de61/data/data2/current/BP-1594657804-172.17.0.2-1732247347376 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:49:59,867 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:49:59,873 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f9b07ee{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:49:59,873 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70854f76{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:49:59,873 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:49:59,873 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45dd2cc3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:49:59,874 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58d3fd6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/hadoop.log.dir/,STOPPED} 2024-11-22T03:49:59,879 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T03:49:59,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:59,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:49:59,896 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T03:49:59,903 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=205 (was 179) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42755 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42755 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:42755 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42755 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:42755 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/c85114ed5096:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:42755 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42755 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:42755 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=485 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=31 (was 73), ProcessCount=11 (was 11), AvailableMemoryMB=2576 (was 2702) 2024-11-22T03:49:59,910 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=485, MaxFileDescriptor=1048576, SystemLoadAverage=31, ProcessCount=11, AvailableMemoryMB=2576 2024-11-22T03:49:59,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T03:49:59,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/hadoop.log.dir so I do NOT create it in target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635 2024-11-22T03:49:59,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/95438ced-2f5d-8287-f85a-b1dabd4c197c/hadoop.tmp.dir so I do NOT create it in target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635 2024-11-22T03:49:59,910 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/cluster_7ba9fba6-8b91-7904-6f67-0deb24c1911c, deleteOnExit=true 2024-11-22T03:49:59,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T03:49:59,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/test.cache.data in system properties and HBase conf 2024-11-22T03:49:59,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T03:49:59,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/hadoop.log.dir in system properties and HBase conf 2024-11-22T03:49:59,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T03:49:59,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T03:49:59,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T03:49:59,911 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T03:49:59,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:49:59,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:49:59,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T03:49:59,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:49:59,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T03:49:59,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T03:49:59,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:49:59,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:49:59,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T03:49:59,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/nfs.dump.dir in system properties and HBase conf 2024-11-22T03:49:59,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/java.io.tmpdir in system properties and HBase conf 2024-11-22T03:49:59,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:49:59,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T03:49:59,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T03:49:59,925 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:49:59,964 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:49:59,968 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:49:59,969 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:49:59,969 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:49:59,970 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:49:59,970 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:49:59,970 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b4e4996{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:49:59,971 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75b38c73{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:50:00,063 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@48f98d26{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/java.io.tmpdir/jetty-localhost-40203-hadoop-hdfs-3_4_1-tests_jar-_-any-639220363911078377/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:50:00,064 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@61c0fae3{HTTP/1.1, (http/1.1)}{localhost:40203} 2024-11-22T03:50:00,064 INFO [Time-limited test {}] server.Server(415): Started @237390ms 2024-11-22T03:50:00,074 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:50:00,128 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:50:00,131 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:50:00,132 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:50:00,132 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:50:00,132 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:50:00,133 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@107c9d2b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:50:00,133 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77b55185{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:50:00,228 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d953028{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/java.io.tmpdir/jetty-localhost-41037-hadoop-hdfs-3_4_1-tests_jar-_-any-2472495645816661080/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:50:00,228 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@424c4a45{HTTP/1.1, (http/1.1)}{localhost:41037} 2024-11-22T03:50:00,228 INFO [Time-limited test {}] server.Server(415): Started @237555ms 2024-11-22T03:50:00,229 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:50:00,249 INFO [regionserver/c85114ed5096:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:50:00,254 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:50:00,257 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:50:00,257 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:50:00,258 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:50:00,258 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T03:50:00,258 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7326ff1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:50:00,258 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@165b13fe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:50:00,284 WARN [Thread-1955 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/cluster_7ba9fba6-8b91-7904-6f67-0deb24c1911c/data/data2/current/BP-1107785697-172.17.0.2-1732247399929/current, will proceed with Du for space computation calculation, 2024-11-22T03:50:00,284 WARN [Thread-1954 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/cluster_7ba9fba6-8b91-7904-6f67-0deb24c1911c/data/data1/current/BP-1107785697-172.17.0.2-1732247399929/current, will proceed with Du for space computation calculation, 2024-11-22T03:50:00,303 WARN [Thread-1933 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:50:00,305 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b2bb919836d3a64 with lease ID 0xbe05745543a3071d: Processing first storage report for DS-9e7e3cec-4349-4b72-8545-0e5746315e44 from datanode DatanodeRegistration(127.0.0.1:41503, datanodeUuid=864e3c50-7fea-4e1d-81f8-9b165c94078c, infoPort=45473, infoSecurePort=0, ipcPort=38739, storageInfo=lv=-57;cid=testClusterID;nsid=1027644636;c=1732247399929) 2024-11-22T03:50:00,305 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b2bb919836d3a64 with lease ID 0xbe05745543a3071d: from storage DS-9e7e3cec-4349-4b72-8545-0e5746315e44 node DatanodeRegistration(127.0.0.1:41503, datanodeUuid=864e3c50-7fea-4e1d-81f8-9b165c94078c, infoPort=45473, infoSecurePort=0, ipcPort=38739, storageInfo=lv=-57;cid=testClusterID;nsid=1027644636;c=1732247399929), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:50:00,305 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b2bb919836d3a64 with lease ID 0xbe05745543a3071d: Processing first storage report for DS-2524901e-250d-4409-9276-3b6e3090bcc8 from datanode DatanodeRegistration(127.0.0.1:41503, datanodeUuid=864e3c50-7fea-4e1d-81f8-9b165c94078c, infoPort=45473, infoSecurePort=0, ipcPort=38739, storageInfo=lv=-57;cid=testClusterID;nsid=1027644636;c=1732247399929) 2024-11-22T03:50:00,305 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b2bb919836d3a64 with lease ID 0xbe05745543a3071d: from storage DS-2524901e-250d-4409-9276-3b6e3090bcc8 node DatanodeRegistration(127.0.0.1:41503, datanodeUuid=864e3c50-7fea-4e1d-81f8-9b165c94078c, infoPort=45473, infoSecurePort=0, ipcPort=38739, storageInfo=lv=-57;cid=testClusterID;nsid=1027644636;c=1732247399929), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:50:00,354 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5f41f884{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/java.io.tmpdir/jetty-localhost-46805-hadoop-hdfs-3_4_1-tests_jar-_-any-15441101269927094655/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:50:00,355 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3d701b2{HTTP/1.1, (http/1.1)}{localhost:46805} 2024-11-22T03:50:00,355 INFO [Time-limited test {}] server.Server(415): Started @237681ms 2024-11-22T03:50:00,356 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:50:00,407 WARN [Thread-1980 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/cluster_7ba9fba6-8b91-7904-6f67-0deb24c1911c/data/data3/current/BP-1107785697-172.17.0.2-1732247399929/current, will proceed with Du for space computation calculation, 2024-11-22T03:50:00,407 WARN [Thread-1981 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/cluster_7ba9fba6-8b91-7904-6f67-0deb24c1911c/data/data4/current/BP-1107785697-172.17.0.2-1732247399929/current, will proceed with Du for space computation calculation, 2024-11-22T03:50:00,423 WARN [Thread-1969 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:50:00,424 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xac14a0e03eb283d0 with lease ID 0xbe05745543a3071e: Processing first storage report for DS-61adafbe-c78c-4f45-bf64-2fc58af26b82 from datanode DatanodeRegistration(127.0.0.1:43633, datanodeUuid=2851d919-f3e6-42f8-9a11-186e42430dbe, infoPort=36665, infoSecurePort=0, ipcPort=41679, storageInfo=lv=-57;cid=testClusterID;nsid=1027644636;c=1732247399929) 2024-11-22T03:50:00,424 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xac14a0e03eb283d0 with lease ID 0xbe05745543a3071e: from storage DS-61adafbe-c78c-4f45-bf64-2fc58af26b82 node DatanodeRegistration(127.0.0.1:43633, datanodeUuid=2851d919-f3e6-42f8-9a11-186e42430dbe, infoPort=36665, infoSecurePort=0, ipcPort=41679, storageInfo=lv=-57;cid=testClusterID;nsid=1027644636;c=1732247399929), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:50:00,425 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xac14a0e03eb283d0 with lease ID 0xbe05745543a3071e: Processing first storage report for DS-a2757959-bb7d-461f-a08f-92f29809231b from datanode DatanodeRegistration(127.0.0.1:43633, datanodeUuid=2851d919-f3e6-42f8-9a11-186e42430dbe, infoPort=36665, infoSecurePort=0, ipcPort=41679, storageInfo=lv=-57;cid=testClusterID;nsid=1027644636;c=1732247399929) 2024-11-22T03:50:00,425 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xac14a0e03eb283d0 with lease ID 0xbe05745543a3071e: from storage DS-a2757959-bb7d-461f-a08f-92f29809231b node DatanodeRegistration(127.0.0.1:43633, datanodeUuid=2851d919-f3e6-42f8-9a11-186e42430dbe, infoPort=36665, infoSecurePort=0, ipcPort=41679, storageInfo=lv=-57;cid=testClusterID;nsid=1027644636;c=1732247399929), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:50:00,476 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635 2024-11-22T03:50:00,482 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/cluster_7ba9fba6-8b91-7904-6f67-0deb24c1911c/zookeeper_0, clientPort=49967, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/cluster_7ba9fba6-8b91-7904-6f67-0deb24c1911c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/cluster_7ba9fba6-8b91-7904-6f67-0deb24c1911c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T03:50:00,483 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49967 2024-11-22T03:50:00,484 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:50:00,486 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:50:00,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:50:00,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:50:00,496 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a with version=8 2024-11-22T03:50:00,496 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/hbase-staging 2024-11-22T03:50:00,498 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c85114ed5096:0 server-side Connection retries=45 2024-11-22T03:50:00,498 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:50:00,498 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:50:00,498 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:50:00,498 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:50:00,498 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:50:00,498 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T03:50:00,498 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:50:00,499 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38717 2024-11-22T03:50:00,501 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38717 connecting to ZooKeeper ensemble=127.0.0.1:49967 2024-11-22T03:50:00,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:387170x0, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:50:00,504 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38717-0x100658d0c8f0000 connected 2024-11-22T03:50:00,516 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:50:00,517 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:50:00,521 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:50:00,521 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a, hbase.cluster.distributed=false 2024-11-22T03:50:00,522 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:50:00,523 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38717 2024-11-22T03:50:00,523 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38717 2024-11-22T03:50:00,523 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38717 2024-11-22T03:50:00,524 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38717 2024-11-22T03:50:00,524 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38717 2024-11-22T03:50:00,540 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c85114ed5096:0 server-side Connection retries=45 2024-11-22T03:50:00,540 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:50:00,540 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:50:00,540 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:50:00,540 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:50:00,540 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:50:00,540 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T03:50:00,540 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:50:00,540 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34883 2024-11-22T03:50:00,542 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34883 connecting to ZooKeeper ensemble=127.0.0.1:49967 2024-11-22T03:50:00,542 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:50:00,544 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:50:00,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:348830x0, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:50:00,547 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34883-0x100658d0c8f0001 connected 2024-11-22T03:50:00,547 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:50:00,547 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T03:50:00,547 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T03:50:00,548 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T03:50:00,549 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:50:00,549 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34883 2024-11-22T03:50:00,549 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34883 2024-11-22T03:50:00,549 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34883 2024-11-22T03:50:00,550 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34883 2024-11-22T03:50:00,550 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34883 2024-11-22T03:50:00,560 DEBUG [M:0;c85114ed5096:38717 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c85114ed5096:38717 2024-11-22T03:50:00,560 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c85114ed5096,38717,1732247400498 2024-11-22T03:50:00,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:50:00,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:50:00,562 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c85114ed5096,38717,1732247400498 2024-11-22T03:50:00,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T03:50:00,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:00,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:00,563 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T03:50:00,563 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c85114ed5096,38717,1732247400498 from backup master directory 2024-11-22T03:50:00,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c85114ed5096,38717,1732247400498 2024-11-22T03:50:00,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:50:00,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:50:00,564 WARN [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:50:00,564 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c85114ed5096,38717,1732247400498 2024-11-22T03:50:00,568 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/hbase.id] with ID: 79c5ff6b-94e3-430c-ac5b-9d480b516378 2024-11-22T03:50:00,568 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/.tmp/hbase.id 2024-11-22T03:50:00,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:50:00,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:50:00,574 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/.tmp/hbase.id]:[hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/hbase.id] 2024-11-22T03:50:00,587 INFO [master/c85114ed5096:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:50:00,587 INFO [master/c85114ed5096:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T03:50:00,588 INFO [master/c85114ed5096:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-22T03:50:00,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:00,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:00,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:50:00,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:50:00,596 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:50:00,597 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T03:50:00,597 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:50:00,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:50:00,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:50:00,604 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store 2024-11-22T03:50:00,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:50:00,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:50:00,610 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:50:00,610 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:50:00,610 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:50:00,610 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:50:00,610 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:50:00,610 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:50:00,610 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:50:00,610 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732247400610Disabling compacts and flushes for region at 1732247400610Disabling writes for close at 1732247400610Writing region close event to WAL at 1732247400610Closed at 1732247400610 2024-11-22T03:50:00,611 WARN [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/.initializing 2024-11-22T03:50:00,611 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/WALs/c85114ed5096,38717,1732247400498 2024-11-22T03:50:00,613 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C38717%2C1732247400498, suffix=, logDir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/WALs/c85114ed5096,38717,1732247400498, archiveDir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/oldWALs, maxLogs=10 2024-11-22T03:50:00,613 INFO [master/c85114ed5096:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C38717%2C1732247400498.1732247400613 2024-11-22T03:50:00,621 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/WALs/c85114ed5096,38717,1732247400498/c85114ed5096%2C38717%2C1732247400498.1732247400613 2024-11-22T03:50:00,622 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45473:45473),(127.0.0.1/127.0.0.1:36665:36665)] 2024-11-22T03:50:00,625 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:50:00,625 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:50:00,625 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:00,626 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:00,629 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:00,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T03:50:00,630 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:00,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:50:00,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:00,631 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T03:50:00,631 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:00,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:50:00,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:00,633 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T03:50:00,633 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:00,633 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:50:00,633 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:00,634 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T03:50:00,634 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:00,635 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:50:00,635 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:00,635 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:00,636 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:00,637 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:00,637 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:00,637 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T03:50:00,638 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:00,641 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:50:00,642 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=783225, jitterRate=-0.004078537225723267}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T03:50:00,642 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732247400626Initializing all the Stores at 1732247400627 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247400627Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247400628 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247400628Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247400628Cleaning up temporary data from old regions at 1732247400637 (+9 ms)Region opened successfully at 1732247400642 (+5 ms) 2024-11-22T03:50:00,642 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T03:50:00,645 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@604c0901, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c85114ed5096/172.17.0.2:0 2024-11-22T03:50:00,646 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T03:50:00,646 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T03:50:00,646 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T03:50:00,646 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T03:50:00,647 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T03:50:00,647 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T03:50:00,647 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T03:50:00,649 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T03:50:00,650 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T03:50:00,650 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T03:50:00,651 INFO [master/c85114ed5096:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T03:50:00,651 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T03:50:00,652 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T03:50:00,652 INFO [master/c85114ed5096:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T03:50:00,653 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T03:50:00,654 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T03:50:00,654 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T03:50:00,655 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T03:50:00,656 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T03:50:00,657 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T03:50:00,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:50:00,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:50:00,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:00,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:00,658 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c85114ed5096,38717,1732247400498, sessionid=0x100658d0c8f0000, setting cluster-up flag (Was=false) 2024-11-22T03:50:00,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:00,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:00,662 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T03:50:00,663 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c85114ed5096,38717,1732247400498 2024-11-22T03:50:00,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:00,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:00,667 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T03:50:00,668 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c85114ed5096,38717,1732247400498 2024-11-22T03:50:00,669 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T03:50:00,671 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T03:50:00,671 INFO [master/c85114ed5096:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T03:50:00,671 INFO [master/c85114ed5096:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T03:50:00,671 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c85114ed5096,38717,1732247400498 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T03:50:00,672 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:50:00,673 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:50:00,673 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:50:00,673 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:50:00,673 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c85114ed5096:0, corePoolSize=10, maxPoolSize=10 2024-11-22T03:50:00,673 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:00,673 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c85114ed5096:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:50:00,673 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:00,677 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732247430676 2024-11-22T03:50:00,677 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:50:00,677 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T03:50:00,677 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T03:50:00,677 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T03:50:00,677 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T03:50:00,677 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T03:50:00,677 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T03:50:00,677 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T03:50:00,677 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:00,678 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:00,678 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T03:50:00,681 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T03:50:00,681 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T03:50:00,681 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T03:50:00,681 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T03:50:00,681 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T03:50:00,682 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247400681,5,FailOnTimeoutGroup] 2024-11-22T03:50:00,682 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247400682,5,FailOnTimeoutGroup] 2024-11-22T03:50:00,682 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:00,682 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T03:50:00,682 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:00,682 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:00,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:50:00,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:50:00,689 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T03:50:00,689 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a 2024-11-22T03:50:00,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:50:00,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:50:00,695 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:50:00,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:50:00,697 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:50:00,698 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:00,698 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:50:00,698 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:50:00,699 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:50:00,699 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:00,700 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:50:00,700 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:50:00,701 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:50:00,701 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:00,701 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:50:00,701 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:50:00,702 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:50:00,702 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:00,702 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:50:00,702 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:50:00,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740 2024-11-22T03:50:00,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740 2024-11-22T03:50:00,705 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:50:00,705 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:50:00,705 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:50:00,706 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:50:00,708 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:50:00,709 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=731215, jitterRate=-0.07021284103393555}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:50:00,709 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732247400695Initializing all the Stores at 1732247400696 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247400696Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247400696Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247400696Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247400696Cleaning up temporary data from old regions at 1732247400705 (+9 ms)Region opened successfully at 1732247400709 (+4 ms) 2024-11-22T03:50:00,709 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:50:00,709 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:50:00,709 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:50:00,709 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:50:00,709 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:50:00,710 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:50:00,710 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732247400709Disabling compacts and flushes for region at 1732247400709Disabling writes for close at 1732247400709Writing region close event to WAL at 1732247400710 (+1 ms)Closed at 1732247400710 2024-11-22T03:50:00,711 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:50:00,711 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T03:50:00,711 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T03:50:00,713 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:50:00,714 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T03:50:00,752 INFO [RS:0;c85114ed5096:34883 {}] regionserver.HRegionServer(746): ClusterId : 79c5ff6b-94e3-430c-ac5b-9d480b516378 2024-11-22T03:50:00,752 DEBUG [RS:0;c85114ed5096:34883 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T03:50:00,754 DEBUG [RS:0;c85114ed5096:34883 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T03:50:00,754 DEBUG [RS:0;c85114ed5096:34883 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T03:50:00,755 DEBUG [RS:0;c85114ed5096:34883 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T03:50:00,755 DEBUG [RS:0;c85114ed5096:34883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@617b3435, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c85114ed5096/172.17.0.2:0 2024-11-22T03:50:00,766 DEBUG [RS:0;c85114ed5096:34883 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c85114ed5096:34883 2024-11-22T03:50:00,766 INFO [RS:0;c85114ed5096:34883 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T03:50:00,766 INFO [RS:0;c85114ed5096:34883 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T03:50:00,766 DEBUG [RS:0;c85114ed5096:34883 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T03:50:00,767 INFO [RS:0;c85114ed5096:34883 {}] regionserver.HRegionServer(2659): reportForDuty to master=c85114ed5096,38717,1732247400498 with port=34883, startcode=1732247400539 2024-11-22T03:50:00,767 DEBUG [RS:0;c85114ed5096:34883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T03:50:00,769 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54577, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T03:50:00,769 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38717 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c85114ed5096,34883,1732247400539 2024-11-22T03:50:00,769 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38717 {}] master.ServerManager(517): Registering regionserver=c85114ed5096,34883,1732247400539 2024-11-22T03:50:00,771 DEBUG [RS:0;c85114ed5096:34883 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a 2024-11-22T03:50:00,771 DEBUG [RS:0;c85114ed5096:34883 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41565 2024-11-22T03:50:00,771 DEBUG [RS:0;c85114ed5096:34883 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T03:50:00,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:50:00,773 DEBUG [RS:0;c85114ed5096:34883 {}] zookeeper.ZKUtil(111): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c85114ed5096,34883,1732247400539 2024-11-22T03:50:00,773 WARN [RS:0;c85114ed5096:34883 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:50:00,773 INFO [RS:0;c85114ed5096:34883 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:50:00,773 DEBUG [RS:0;c85114ed5096:34883 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/WALs/c85114ed5096,34883,1732247400539 2024-11-22T03:50:00,773 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c85114ed5096,34883,1732247400539] 2024-11-22T03:50:00,776 INFO [RS:0;c85114ed5096:34883 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T03:50:00,781 INFO [RS:0;c85114ed5096:34883 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T03:50:00,781 INFO [RS:0;c85114ed5096:34883 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T03:50:00,781 INFO [RS:0;c85114ed5096:34883 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:00,782 INFO [RS:0;c85114ed5096:34883 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T03:50:00,782 INFO [RS:0;c85114ed5096:34883 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T03:50:00,782 INFO [RS:0;c85114ed5096:34883 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:00,783 DEBUG [RS:0;c85114ed5096:34883 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:00,783 DEBUG [RS:0;c85114ed5096:34883 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:00,783 DEBUG [RS:0;c85114ed5096:34883 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:00,783 DEBUG [RS:0;c85114ed5096:34883 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:00,783 DEBUG [RS:0;c85114ed5096:34883 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:00,783 DEBUG [RS:0;c85114ed5096:34883 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c85114ed5096:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:50:00,783 DEBUG [RS:0;c85114ed5096:34883 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:00,783 DEBUG [RS:0;c85114ed5096:34883 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:00,783 DEBUG [RS:0;c85114ed5096:34883 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:00,783 DEBUG [RS:0;c85114ed5096:34883 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:00,783 DEBUG [RS:0;c85114ed5096:34883 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:00,783 DEBUG [RS:0;c85114ed5096:34883 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:00,783 DEBUG [RS:0;c85114ed5096:34883 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c85114ed5096:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:50:00,783 DEBUG [RS:0;c85114ed5096:34883 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:50:00,784 INFO [RS:0;c85114ed5096:34883 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:00,784 INFO [RS:0;c85114ed5096:34883 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:00,784 INFO [RS:0;c85114ed5096:34883 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:00,784 INFO [RS:0;c85114ed5096:34883 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:00,784 INFO [RS:0;c85114ed5096:34883 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:00,784 INFO [RS:0;c85114ed5096:34883 {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,34883,1732247400539-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:50:00,798 INFO [RS:0;c85114ed5096:34883 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T03:50:00,798 INFO [RS:0;c85114ed5096:34883 {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,34883,1732247400539-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:00,798 INFO [RS:0;c85114ed5096:34883 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:00,798 INFO [RS:0;c85114ed5096:34883 {}] regionserver.Replication(171): c85114ed5096,34883,1732247400539 started 2024-11-22T03:50:00,810 INFO [RS:0;c85114ed5096:34883 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:00,810 INFO [RS:0;c85114ed5096:34883 {}] regionserver.HRegionServer(1482): Serving as c85114ed5096,34883,1732247400539, RpcServer on c85114ed5096/172.17.0.2:34883, sessionid=0x100658d0c8f0001 2024-11-22T03:50:00,811 DEBUG [RS:0;c85114ed5096:34883 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T03:50:00,811 DEBUG [RS:0;c85114ed5096:34883 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c85114ed5096,34883,1732247400539 2024-11-22T03:50:00,811 DEBUG [RS:0;c85114ed5096:34883 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c85114ed5096,34883,1732247400539' 2024-11-22T03:50:00,811 DEBUG [RS:0;c85114ed5096:34883 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T03:50:00,811 DEBUG [RS:0;c85114ed5096:34883 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T03:50:00,812 DEBUG [RS:0;c85114ed5096:34883 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T03:50:00,812 DEBUG [RS:0;c85114ed5096:34883 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T03:50:00,812 DEBUG [RS:0;c85114ed5096:34883 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c85114ed5096,34883,1732247400539 2024-11-22T03:50:00,812 DEBUG [RS:0;c85114ed5096:34883 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c85114ed5096,34883,1732247400539' 2024-11-22T03:50:00,812 DEBUG [RS:0;c85114ed5096:34883 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T03:50:00,812 DEBUG [RS:0;c85114ed5096:34883 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T03:50:00,812 DEBUG [RS:0;c85114ed5096:34883 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T03:50:00,812 INFO [RS:0;c85114ed5096:34883 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T03:50:00,812 INFO [RS:0;c85114ed5096:34883 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T03:50:00,865 WARN [c85114ed5096:38717 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T03:50:00,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:00,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:00,916 INFO [RS:0;c85114ed5096:34883 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C34883%2C1732247400539, suffix=, logDir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/WALs/c85114ed5096,34883,1732247400539, archiveDir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/oldWALs, maxLogs=32 2024-11-22T03:50:00,918 INFO [RS:0;c85114ed5096:34883 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C34883%2C1732247400539.1732247400917 2024-11-22T03:50:00,928 INFO [RS:0;c85114ed5096:34883 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/WALs/c85114ed5096,34883,1732247400539/c85114ed5096%2C34883%2C1732247400539.1732247400917 2024-11-22T03:50:00,930 DEBUG [RS:0;c85114ed5096:34883 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36665:36665),(127.0.0.1/127.0.0.1:45473:45473)] 2024-11-22T03:50:01,115 DEBUG [c85114ed5096:38717 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T03:50:01,116 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c85114ed5096,34883,1732247400539 2024-11-22T03:50:01,118 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c85114ed5096,34883,1732247400539, state=OPENING 2024-11-22T03:50:01,120 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T03:50:01,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:01,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:01,123 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:50:01,123 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c85114ed5096,34883,1732247400539}] 2024-11-22T03:50:01,123 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:50:01,123 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:50:01,278 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T03:50:01,280 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48255, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T03:50:01,285 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T03:50:01,285 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:50:01,288 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C34883%2C1732247400539.meta, suffix=.meta, logDir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/WALs/c85114ed5096,34883,1732247400539, archiveDir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/oldWALs, maxLogs=32 2024-11-22T03:50:01,289 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C34883%2C1732247400539.meta.1732247401288.meta 2024-11-22T03:50:01,296 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/WALs/c85114ed5096,34883,1732247400539/c85114ed5096%2C34883%2C1732247400539.meta.1732247401288.meta 2024-11-22T03:50:01,298 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45473:45473),(127.0.0.1/127.0.0.1:36665:36665)] 2024-11-22T03:50:01,298 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:50:01,299 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T03:50:01,299 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T03:50:01,299 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T03:50:01,299 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T03:50:01,299 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:50:01,299 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T03:50:01,299 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T03:50:01,300 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:50:01,301 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:50:01,301 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:01,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:50:01,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:50:01,302 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:50:01,302 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:01,303 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:50:01,303 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:50:01,303 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:50:01,303 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:01,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:50:01,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:50:01,304 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:50:01,305 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:01,305 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:50:01,305 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:50:01,306 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740 2024-11-22T03:50:01,307 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740 2024-11-22T03:50:01,308 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:50:01,308 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:50:01,308 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:50:01,309 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:50:01,310 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=700423, jitterRate=-0.10936662554740906}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:50:01,310 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T03:50:01,311 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732247401299Writing region info on filesystem at 1732247401299Initializing all the Stores at 1732247401300 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247401300Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247401300Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247401300Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247401300Cleaning up temporary data from old regions at 1732247401308 (+8 ms)Running coprocessor post-open hooks at 1732247401310 (+2 ms)Region opened successfully at 1732247401311 (+1 ms) 2024-11-22T03:50:01,312 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732247401278 2024-11-22T03:50:01,314 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T03:50:01,314 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T03:50:01,315 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c85114ed5096,34883,1732247400539 2024-11-22T03:50:01,315 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c85114ed5096,34883,1732247400539, state=OPEN 2024-11-22T03:50:01,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:50:01,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:50:01,317 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c85114ed5096,34883,1732247400539 2024-11-22T03:50:01,317 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:50:01,317 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:50:01,320 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T03:50:01,320 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c85114ed5096,34883,1732247400539 in 194 msec 2024-11-22T03:50:01,322 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T03:50:01,322 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 609 msec 2024-11-22T03:50:01,323 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:50:01,323 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T03:50:01,324 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:50:01,324 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c85114ed5096,34883,1732247400539, seqNum=-1] 2024-11-22T03:50:01,324 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:50:01,325 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43999, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:50:01,331 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 660 msec 2024-11-22T03:50:01,331 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732247401331, completionTime=-1 2024-11-22T03:50:01,331 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T03:50:01,331 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T03:50:01,333 INFO [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T03:50:01,333 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732247461333 2024-11-22T03:50:01,333 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732247521333 2024-11-22T03:50:01,333 INFO [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-22T03:50:01,333 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,38717,1732247400498-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:01,333 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,38717,1732247400498-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:01,333 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,38717,1732247400498-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:01,333 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c85114ed5096:38717, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:01,334 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:01,334 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:01,336 DEBUG [master/c85114ed5096:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T03:50:01,337 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.773sec 2024-11-22T03:50:01,337 INFO [master/c85114ed5096:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T03:50:01,337 INFO [master/c85114ed5096:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T03:50:01,337 INFO [master/c85114ed5096:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T03:50:01,337 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T03:50:01,337 INFO [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T03:50:01,337 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,38717,1732247400498-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:50:01,338 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,38717,1732247400498-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T03:50:01,340 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T03:50:01,340 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T03:50:01,340 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,38717,1732247400498-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:01,352 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ae48b16, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:50:01,352 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c85114ed5096,38717,-1 for getting cluster id 2024-11-22T03:50:01,352 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T03:50:01,354 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '79c5ff6b-94e3-430c-ac5b-9d480b516378' 2024-11-22T03:50:01,354 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T03:50:01,354 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "79c5ff6b-94e3-430c-ac5b-9d480b516378" 2024-11-22T03:50:01,354 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77af3a48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:50:01,354 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c85114ed5096,38717,-1] 2024-11-22T03:50:01,354 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T03:50:01,355 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:50:01,356 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51332, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T03:50:01,357 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5169cded, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:50:01,357 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:50:01,358 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c85114ed5096,34883,1732247400539, seqNum=-1] 2024-11-22T03:50:01,358 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:50:01,359 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36400, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:50:01,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c85114ed5096,38717,1732247400498 2024-11-22T03:50:01,361 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:50:01,364 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T03:50:01,364 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T03:50:01,366 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is c85114ed5096,38717,1732247400498 2024-11-22T03:50:01,366 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@45c60306 2024-11-22T03:50:01,366 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T03:50:01,367 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51334, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T03:50:01,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38717 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T03:50:01,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38717 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T03:50:01,368 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38717 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:50:01,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38717 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-22T03:50:01,371 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T03:50:01,371 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:01,372 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38717 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-22T03:50:01,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38717 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T03:50:01,373 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T03:50:01,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741835_1011 (size=381) 2024-11-22T03:50:01,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741835_1011 (size=381) 2024-11-22T03:50:01,382 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b6129dd3d19b786934def7fda16374cd, NAME => 'TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a 2024-11-22T03:50:01,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741836_1012 (size=64) 2024-11-22T03:50:01,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741836_1012 (size=64) 2024-11-22T03:50:01,388 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:50:01,388 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing b6129dd3d19b786934def7fda16374cd, disabling compactions & flushes 2024-11-22T03:50:01,388 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. 2024-11-22T03:50:01,388 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. 2024-11-22T03:50:01,388 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. after waiting 0 ms 2024-11-22T03:50:01,388 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. 2024-11-22T03:50:01,388 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. 2024-11-22T03:50:01,388 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for b6129dd3d19b786934def7fda16374cd: Waiting for close lock at 1732247401388Disabling compacts and flushes for region at 1732247401388Disabling writes for close at 1732247401388Writing region close event to WAL at 1732247401388Closed at 1732247401388 2024-11-22T03:50:01,389 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T03:50:01,390 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732247401389"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732247401389"}]},"ts":"1732247401389"} 2024-11-22T03:50:01,392 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T03:50:01,394 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T03:50:01,394 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732247401394"}]},"ts":"1732247401394"} 2024-11-22T03:50:01,396 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-22T03:50:01,397 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6129dd3d19b786934def7fda16374cd, ASSIGN}] 2024-11-22T03:50:01,399 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6129dd3d19b786934def7fda16374cd, ASSIGN 2024-11-22T03:50:01,400 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6129dd3d19b786934def7fda16374cd, ASSIGN; state=OFFLINE, location=c85114ed5096,34883,1732247400539; forceNewPlan=false, retain=false 2024-11-22T03:50:01,551 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b6129dd3d19b786934def7fda16374cd, regionState=OPENING, regionLocation=c85114ed5096,34883,1732247400539 2024-11-22T03:50:01,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6129dd3d19b786934def7fda16374cd, ASSIGN because future has completed 2024-11-22T03:50:01,557 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b6129dd3d19b786934def7fda16374cd, server=c85114ed5096,34883,1732247400539}] 2024-11-22T03:50:01,717 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. 2024-11-22T03:50:01,718 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => b6129dd3d19b786934def7fda16374cd, NAME => 'TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:50:01,718 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:01,718 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:50:01,719 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:01,719 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:01,722 INFO [StoreOpener-b6129dd3d19b786934def7fda16374cd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:01,725 INFO [StoreOpener-b6129dd3d19b786934def7fda16374cd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b6129dd3d19b786934def7fda16374cd columnFamilyName info 2024-11-22T03:50:01,725 DEBUG [StoreOpener-b6129dd3d19b786934def7fda16374cd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:01,726 INFO [StoreOpener-b6129dd3d19b786934def7fda16374cd-1 {}] regionserver.HStore(327): Store=b6129dd3d19b786934def7fda16374cd/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:50:01,726 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:01,728 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:01,728 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:01,729 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:01,729 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:01,731 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:01,733 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:50:01,734 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened b6129dd3d19b786934def7fda16374cd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=707967, jitterRate=-0.09977427124977112}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T03:50:01,734 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:01,734 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for b6129dd3d19b786934def7fda16374cd: Running coprocessor pre-open hook at 1732247401719Writing region info on filesystem at 1732247401719Initializing all the Stores at 1732247401721 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247401721Cleaning up temporary data from old regions at 1732247401729 (+8 ms)Running coprocessor post-open hooks at 1732247401734 (+5 ms)Region opened successfully at 1732247401734 2024-11-22T03:50:01,736 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd., pid=6, masterSystemTime=1732247401712 2024-11-22T03:50:01,738 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. 2024-11-22T03:50:01,738 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. 2024-11-22T03:50:01,739 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b6129dd3d19b786934def7fda16374cd, regionState=OPEN, openSeqNum=2, regionLocation=c85114ed5096,34883,1732247400539 2024-11-22T03:50:01,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b6129dd3d19b786934def7fda16374cd, server=c85114ed5096,34883,1732247400539 because future has completed 2024-11-22T03:50:01,745 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T03:50:01,745 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure b6129dd3d19b786934def7fda16374cd, server=c85114ed5096,34883,1732247400539 in 186 msec 2024-11-22T03:50:01,747 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T03:50:01,747 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6129dd3d19b786934def7fda16374cd, ASSIGN in 348 msec 2024-11-22T03:50:01,748 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T03:50:01,749 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732247401748"}]},"ts":"1732247401748"} 2024-11-22T03:50:01,751 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-22T03:50:01,753 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T03:50:01,755 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 385 msec 2024-11-22T03:50:01,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:01,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:02,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:02,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:03,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:03,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:04,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,412 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,412 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,412 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,412 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,412 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,442 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:04,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:04,947 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T03:50:04,948 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,952 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,976 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,976 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,976 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,981 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,981 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,981 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:04,984 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:05,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:05,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:06,776 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T03:50:06,777 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-22T03:50:06,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:06,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:07,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:07,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:08,749 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-22T03:50:08,749 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-22T03:50:08,750 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T03:50:08,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:08,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:09,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:09,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:10,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:10,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:11,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38717 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T03:50:11,457 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-22T03:50:11,457 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-22T03:50:11,459 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-22T03:50:11,459 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. 2024-11-22T03:50:11,462 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd., hostname=c85114ed5096,34883,1732247400539, seqNum=2] 2024-11-22T03:50:11,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:11,478 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b6129dd3d19b786934def7fda16374cd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:50:11,496 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/b2540d87d4fd42ddb9bb3ad0bfe55105 is 1080, key is row0001/info:/1732247411463/Put/seqid=0 2024-11-22T03:50:11,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741837_1013 (size=12509) 2024-11-22T03:50:11,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741837_1013 (size=12509) 2024-11-22T03:50:11,528 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b6129dd3d19b786934def7fda16374cd, server=c85114ed5096,34883,1732247400539 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-22T03:50:11,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:36400 deadline: 1732247421527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b6129dd3d19b786934def7fda16374cd, server=c85114ed5096,34883,1732247400539 2024-11-22T03:50:11,532 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd., hostname=c85114ed5096,34883,1732247400539, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd., hostname=c85114ed5096,34883,1732247400539, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b6129dd3d19b786934def7fda16374cd, server=c85114ed5096,34883,1732247400539 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T03:50:11,533 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd., hostname=c85114ed5096,34883,1732247400539, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b6129dd3d19b786934def7fda16374cd, server=c85114ed5096,34883,1732247400539 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T03:50:11,533 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd., hostname=c85114ed5096,34883,1732247400539, seqNum=2 because the exception is null or not the one we care about 2024-11-22T03:50:11,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:11,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:11,902 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/b2540d87d4fd42ddb9bb3ad0bfe55105 2024-11-22T03:50:11,914 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/b2540d87d4fd42ddb9bb3ad0bfe55105 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/b2540d87d4fd42ddb9bb3ad0bfe55105 2024-11-22T03:50:11,920 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/b2540d87d4fd42ddb9bb3ad0bfe55105, entries=7, sequenceid=11, filesize=12.2 K 2024-11-22T03:50:11,921 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for b6129dd3d19b786934def7fda16374cd in 443ms, sequenceid=11, compaction requested=false 2024-11-22T03:50:11,921 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b6129dd3d19b786934def7fda16374cd: 2024-11-22T03:50:12,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:12,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:13,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:13,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:14,253 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T03:50:14,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:14,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:14,255 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:14,255 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:14,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:14,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:14,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:14,258 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:14,286 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:14,286 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:14,286 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:14,287 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:14,287 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:14,287 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:14,290 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:14,291 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:14,291 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:14,293 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:14,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:14,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:15,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:15,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:16,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:16,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:17,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:17,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:18,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:18,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:19,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:19,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:20,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:20,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:21,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:21,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b6129dd3d19b786934def7fda16374cd 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-22T03:50:21,616 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/5a7966336b2149cda9414878ae4c3769 is 1080, key is row0008/info:/1732247411480/Put/seqid=0 2024-11-22T03:50:21,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741838_1014 (size=29761) 2024-11-22T03:50:21,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741838_1014 (size=29761) 2024-11-22T03:50:21,623 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/5a7966336b2149cda9414878ae4c3769 2024-11-22T03:50:21,630 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/5a7966336b2149cda9414878ae4c3769 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/5a7966336b2149cda9414878ae4c3769 2024-11-22T03:50:21,637 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/5a7966336b2149cda9414878ae4c3769, entries=23, sequenceid=37, filesize=29.1 K 2024-11-22T03:50:21,638 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for b6129dd3d19b786934def7fda16374cd in 29ms, sequenceid=37, compaction requested=false 2024-11-22T03:50:21,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b6129dd3d19b786934def7fda16374cd: 2024-11-22T03:50:21,638 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-22T03:50:21,638 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:50:21,638 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/5a7966336b2149cda9414878ae4c3769 because midkey is the same as first or last row 2024-11-22T03:50:21,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:21,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:22,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:22,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:23,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:23,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b6129dd3d19b786934def7fda16374cd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:50:23,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/4f3876a88757432d9fbb03a91608b5c8 is 1080, key is row0031/info:/1732247421610/Put/seqid=0 2024-11-22T03:50:23,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741839_1015 (size=12509) 2024-11-22T03:50:23,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741839_1015 (size=12509) 2024-11-22T03:50:23,643 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/4f3876a88757432d9fbb03a91608b5c8 2024-11-22T03:50:23,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/4f3876a88757432d9fbb03a91608b5c8 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/4f3876a88757432d9fbb03a91608b5c8 2024-11-22T03:50:23,655 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/4f3876a88757432d9fbb03a91608b5c8, entries=7, sequenceid=47, filesize=12.2 K 2024-11-22T03:50:23,656 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for b6129dd3d19b786934def7fda16374cd in 25ms, sequenceid=47, compaction requested=true 2024-11-22T03:50:23,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b6129dd3d19b786934def7fda16374cd: 2024-11-22T03:50:23,656 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-22T03:50:23,656 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:50:23,656 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/5a7966336b2149cda9414878ae4c3769 because midkey is the same as first or last row 2024-11-22T03:50:23,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b6129dd3d19b786934def7fda16374cd:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:50:23,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:23,657 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:50:23,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:23,657 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b6129dd3d19b786934def7fda16374cd 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-22T03:50:23,658 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:50:23,658 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1541): b6129dd3d19b786934def7fda16374cd/info is initiating minor compaction (all files) 2024-11-22T03:50:23,658 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b6129dd3d19b786934def7fda16374cd/info in TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. 2024-11-22T03:50:23,658 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/b2540d87d4fd42ddb9bb3ad0bfe55105, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/5a7966336b2149cda9414878ae4c3769, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/4f3876a88757432d9fbb03a91608b5c8] into tmpdir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp, totalSize=53.5 K 2024-11-22T03:50:23,658 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting b2540d87d4fd42ddb9bb3ad0bfe55105, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732247411463 2024-11-22T03:50:23,659 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5a7966336b2149cda9414878ae4c3769, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732247411480 2024-11-22T03:50:23,659 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4f3876a88757432d9fbb03a91608b5c8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732247421610 2024-11-22T03:50:23,661 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/43b1091cdef64aefa4931107c8198a32 is 1080, key is row0038/info:/1732247423633/Put/seqid=0 2024-11-22T03:50:23,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741840_1016 (size=16817) 2024-11-22T03:50:23,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741840_1016 (size=16817) 2024-11-22T03:50:23,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=61 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/43b1091cdef64aefa4931107c8198a32 2024-11-22T03:50:23,673 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b6129dd3d19b786934def7fda16374cd#info#compaction#59 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:50:23,674 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/ec7c77368efb45d4a3e09364730a1c48 is 1080, key is row0001/info:/1732247411463/Put/seqid=0 2024-11-22T03:50:23,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/43b1091cdef64aefa4931107c8198a32 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/43b1091cdef64aefa4931107c8198a32 2024-11-22T03:50:23,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741841_1017 (size=44978) 2024-11-22T03:50:23,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741841_1017 (size=44978) 2024-11-22T03:50:23,682 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/43b1091cdef64aefa4931107c8198a32, entries=11, sequenceid=61, filesize=16.4 K 2024-11-22T03:50:23,683 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for b6129dd3d19b786934def7fda16374cd in 26ms, sequenceid=61, compaction requested=false 2024-11-22T03:50:23,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b6129dd3d19b786934def7fda16374cd: 2024-11-22T03:50:23,683 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=69.9 K, sizeToCheck=16.0 K 2024-11-22T03:50:23,683 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:50:23,684 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/5a7966336b2149cda9414878ae4c3769 because midkey is the same as first or last row 2024-11-22T03:50:23,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:23,686 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b6129dd3d19b786934def7fda16374cd 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-22T03:50:23,687 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/ec7c77368efb45d4a3e09364730a1c48 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/ec7c77368efb45d4a3e09364730a1c48 2024-11-22T03:50:23,690 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/cb32f18b6aeb4f119d509913b7f4a703 is 1080, key is row0049/info:/1732247423659/Put/seqid=0 2024-11-22T03:50:23,694 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b6129dd3d19b786934def7fda16374cd/info of b6129dd3d19b786934def7fda16374cd into ec7c77368efb45d4a3e09364730a1c48(size=43.9 K), total size for store is 60.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:50:23,694 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b6129dd3d19b786934def7fda16374cd: 2024-11-22T03:50:23,694 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd., storeName=b6129dd3d19b786934def7fda16374cd/info, priority=13, startTime=1732247423656; duration=0sec 2024-11-22T03:50:23,694 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=60.3 K, sizeToCheck=16.0 K 2024-11-22T03:50:23,694 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:50:23,694 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/ec7c77368efb45d4a3e09364730a1c48 because midkey is the same as first or last row 2024-11-22T03:50:23,695 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=60.3 K, sizeToCheck=16.0 K 2024-11-22T03:50:23,695 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:50:23,695 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/ec7c77368efb45d4a3e09364730a1c48 because midkey is the same as first or last row 2024-11-22T03:50:23,695 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=60.3 K, sizeToCheck=16.0 K 2024-11-22T03:50:23,695 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:50:23,695 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/ec7c77368efb45d4a3e09364730a1c48 because midkey is the same as first or last row 2024-11-22T03:50:23,695 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:23,695 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b6129dd3d19b786934def7fda16374cd:info 2024-11-22T03:50:23,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741842_1018 (size=16817) 2024-11-22T03:50:23,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741842_1018 (size=16817) 2024-11-22T03:50:23,699 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/cb32f18b6aeb4f119d509913b7f4a703 2024-11-22T03:50:23,705 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/cb32f18b6aeb4f119d509913b7f4a703 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/cb32f18b6aeb4f119d509913b7f4a703 2024-11-22T03:50:23,709 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/cb32f18b6aeb4f119d509913b7f4a703, entries=11, sequenceid=75, filesize=16.4 K 2024-11-22T03:50:23,710 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=5.25 KB/5380 for b6129dd3d19b786934def7fda16374cd in 24ms, sequenceid=75, compaction requested=true 2024-11-22T03:50:23,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b6129dd3d19b786934def7fda16374cd: 2024-11-22T03:50:23,710 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=76.8 K, sizeToCheck=16.0 K 2024-11-22T03:50:23,710 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:50:23,711 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/ec7c77368efb45d4a3e09364730a1c48 because midkey is the same as first or last row 2024-11-22T03:50:23,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b6129dd3d19b786934def7fda16374cd:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:50:23,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:23,711 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:50:23,712 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 78612 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:50:23,712 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1541): b6129dd3d19b786934def7fda16374cd/info is initiating minor compaction (all files) 2024-11-22T03:50:23,712 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b6129dd3d19b786934def7fda16374cd/info in TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. 2024-11-22T03:50:23,712 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/ec7c77368efb45d4a3e09364730a1c48, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/43b1091cdef64aefa4931107c8198a32, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/cb32f18b6aeb4f119d509913b7f4a703] into tmpdir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp, totalSize=76.8 K 2024-11-22T03:50:23,713 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting ec7c77368efb45d4a3e09364730a1c48, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732247411463 2024-11-22T03:50:23,713 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting 43b1091cdef64aefa4931107c8198a32, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=61, earliestPutTs=1732247423633 2024-11-22T03:50:23,713 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting cb32f18b6aeb4f119d509913b7f4a703, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732247423659 2024-11-22T03:50:23,727 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b6129dd3d19b786934def7fda16374cd#info#compaction#61 average throughput is 30.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:50:23,727 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/de52b2a8a18b42588c486035216d8297 is 1080, key is row0001/info:/1732247411463/Put/seqid=0 2024-11-22T03:50:23,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741843_1019 (size=68843) 2024-11-22T03:50:23,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741843_1019 (size=68843) 2024-11-22T03:50:23,737 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/de52b2a8a18b42588c486035216d8297 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/de52b2a8a18b42588c486035216d8297 2024-11-22T03:50:23,743 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b6129dd3d19b786934def7fda16374cd/info of b6129dd3d19b786934def7fda16374cd into de52b2a8a18b42588c486035216d8297(size=67.2 K), total size for store is 67.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:50:23,743 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b6129dd3d19b786934def7fda16374cd: 2024-11-22T03:50:23,743 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd., storeName=b6129dd3d19b786934def7fda16374cd/info, priority=13, startTime=1732247423711; duration=0sec 2024-11-22T03:50:23,744 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.2 K, sizeToCheck=16.0 K 2024-11-22T03:50:23,744 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:50:23,744 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/de52b2a8a18b42588c486035216d8297 because midkey is the same as first or last row 2024-11-22T03:50:23,744 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.2 K, sizeToCheck=16.0 K 2024-11-22T03:50:23,744 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:50:23,744 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/de52b2a8a18b42588c486035216d8297 because midkey is the same as first or last row 2024-11-22T03:50:23,744 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.2 K, sizeToCheck=16.0 K 2024-11-22T03:50:23,744 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:50:23,744 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/de52b2a8a18b42588c486035216d8297 because midkey is the same as first or last row 2024-11-22T03:50:23,744 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:23,744 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b6129dd3d19b786934def7fda16374cd:info 2024-11-22T03:50:23,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:23,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:24,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:24,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:25,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:25,707 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b6129dd3d19b786934def7fda16374cd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:50:25,712 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/398a31f08d0c43be8a1982405e038e6c is 1080, key is row0060/info:/1732247423687/Put/seqid=0 2024-11-22T03:50:25,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741844_1020 (size=12509) 2024-11-22T03:50:25,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741844_1020 (size=12509) 2024-11-22T03:50:25,719 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/398a31f08d0c43be8a1982405e038e6c 2024-11-22T03:50:25,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/398a31f08d0c43be8a1982405e038e6c as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/398a31f08d0c43be8a1982405e038e6c 2024-11-22T03:50:25,730 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/398a31f08d0c43be8a1982405e038e6c, entries=7, sequenceid=87, filesize=12.2 K 2024-11-22T03:50:25,731 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for b6129dd3d19b786934def7fda16374cd in 24ms, sequenceid=87, compaction requested=false 2024-11-22T03:50:25,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b6129dd3d19b786934def7fda16374cd: 2024-11-22T03:50:25,732 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=79.4 K, sizeToCheck=16.0 K 2024-11-22T03:50:25,732 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:50:25,732 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/de52b2a8a18b42588c486035216d8297 because midkey is the same as first or last row 2024-11-22T03:50:25,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:25,733 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b6129dd3d19b786934def7fda16374cd 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-22T03:50:25,738 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/b93e45e282d5419eb75acecda33b02d2 is 1080, key is row0067/info:/1732247425709/Put/seqid=0 2024-11-22T03:50:25,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741845_1021 (size=18987) 2024-11-22T03:50:25,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741845_1021 (size=18987) 2024-11-22T03:50:25,743 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=103 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/b93e45e282d5419eb75acecda33b02d2 2024-11-22T03:50:25,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/b93e45e282d5419eb75acecda33b02d2 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/b93e45e282d5419eb75acecda33b02d2 2024-11-22T03:50:25,755 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/b93e45e282d5419eb75acecda33b02d2, entries=13, sequenceid=103, filesize=18.5 K 2024-11-22T03:50:25,756 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=11.56 KB/11836 for b6129dd3d19b786934def7fda16374cd in 23ms, sequenceid=103, compaction requested=true 2024-11-22T03:50:25,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b6129dd3d19b786934def7fda16374cd: 2024-11-22T03:50:25,757 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=98.0 K, sizeToCheck=16.0 K 2024-11-22T03:50:25,757 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:50:25,757 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/de52b2a8a18b42588c486035216d8297 because midkey is the same as first or last row 2024-11-22T03:50:25,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b6129dd3d19b786934def7fda16374cd:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:50:25,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:25,757 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:50:25,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:25,758 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b6129dd3d19b786934def7fda16374cd 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-22T03:50:25,758 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 100339 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:50:25,758 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1541): b6129dd3d19b786934def7fda16374cd/info is initiating minor compaction (all files) 2024-11-22T03:50:25,758 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b6129dd3d19b786934def7fda16374cd/info in TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. 2024-11-22T03:50:25,758 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/de52b2a8a18b42588c486035216d8297, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/398a31f08d0c43be8a1982405e038e6c, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/b93e45e282d5419eb75acecda33b02d2] into tmpdir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp, totalSize=98.0 K 2024-11-22T03:50:25,759 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting de52b2a8a18b42588c486035216d8297, keycount=59, bloomtype=ROW, size=67.2 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732247411463 2024-11-22T03:50:25,759 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting 398a31f08d0c43be8a1982405e038e6c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732247423687 2024-11-22T03:50:25,760 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting b93e45e282d5419eb75acecda33b02d2, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1732247425709 2024-11-22T03:50:25,763 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/f037071249a5479286f188f35c05929f is 1080, key is row0080/info:/1732247425734/Put/seqid=0 2024-11-22T03:50:25,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741846_1022 (size=17894) 2024-11-22T03:50:25,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741846_1022 (size=17894) 2024-11-22T03:50:25,770 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/f037071249a5479286f188f35c05929f 2024-11-22T03:50:25,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/f037071249a5479286f188f35c05929f as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/f037071249a5479286f188f35c05929f 2024-11-22T03:50:25,775 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b6129dd3d19b786934def7fda16374cd#info#compaction#65 average throughput is 20.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:50:25,776 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/a3463ad954df421a89db9d14a815ce51 is 1080, key is row0001/info:/1732247411463/Put/seqid=0 2024-11-22T03:50:25,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741847_1023 (size=90562) 2024-11-22T03:50:25,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741847_1023 (size=90562) 2024-11-22T03:50:25,781 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/f037071249a5479286f188f35c05929f, entries=12, sequenceid=118, filesize=17.5 K 2024-11-22T03:50:25,782 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=5.25 KB/5380 for b6129dd3d19b786934def7fda16374cd in 24ms, sequenceid=118, compaction requested=false 2024-11-22T03:50:25,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b6129dd3d19b786934def7fda16374cd: 2024-11-22T03:50:25,782 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=115.5 K, sizeToCheck=16.0 K 2024-11-22T03:50:25,782 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:50:25,782 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/de52b2a8a18b42588c486035216d8297 because midkey is the same as first or last row 2024-11-22T03:50:25,786 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/a3463ad954df421a89db9d14a815ce51 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/a3463ad954df421a89db9d14a815ce51 2024-11-22T03:50:25,792 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b6129dd3d19b786934def7fda16374cd/info of b6129dd3d19b786934def7fda16374cd into a3463ad954df421a89db9d14a815ce51(size=88.4 K), total size for store is 105.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:50:25,792 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b6129dd3d19b786934def7fda16374cd: 2024-11-22T03:50:25,792 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd., storeName=b6129dd3d19b786934def7fda16374cd/info, priority=13, startTime=1732247425757; duration=0sec 2024-11-22T03:50:25,792 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=105.9 K, sizeToCheck=16.0 K 2024-11-22T03:50:25,792 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:50:25,792 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=105.9 K, sizeToCheck=16.0 K 2024-11-22T03:50:25,792 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:50:25,792 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=105.9 K, sizeToCheck=16.0 K 2024-11-22T03:50:25,792 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T03:50:25,793 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:25,793 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:25,793 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b6129dd3d19b786934def7fda16374cd:info 2024-11-22T03:50:25,794 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38717 {}] assignment.AssignmentManager(1363): Split request from c85114ed5096,34883,1732247400539, parent={ENCODED => b6129dd3d19b786934def7fda16374cd, NAME => 'TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-22T03:50:25,799 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38717 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=c85114ed5096,34883,1732247400539 2024-11-22T03:50:25,802 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38717 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b6129dd3d19b786934def7fda16374cd, daughterA=112c809438819710f42ad8e377df0d19, daughterB=37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:25,803 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b6129dd3d19b786934def7fda16374cd, daughterA=112c809438819710f42ad8e377df0d19, daughterB=37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:25,804 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b6129dd3d19b786934def7fda16374cd, daughterA=112c809438819710f42ad8e377df0d19, daughterB=37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:25,804 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b6129dd3d19b786934def7fda16374cd, daughterA=112c809438819710f42ad8e377df0d19, daughterB=37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:25,812 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6129dd3d19b786934def7fda16374cd, UNASSIGN}] 2024-11-22T03:50:25,814 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6129dd3d19b786934def7fda16374cd, UNASSIGN 2024-11-22T03:50:25,815 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=b6129dd3d19b786934def7fda16374cd, regionState=CLOSING, regionLocation=c85114ed5096,34883,1732247400539 2024-11-22T03:50:25,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6129dd3d19b786934def7fda16374cd, UNASSIGN because future has completed 2024-11-22T03:50:25,818 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-22T03:50:25,818 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure b6129dd3d19b786934def7fda16374cd, server=c85114ed5096,34883,1732247400539}] 2024-11-22T03:50:25,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:25,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:25,975 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:25,975 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-22T03:50:25,976 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing b6129dd3d19b786934def7fda16374cd, disabling compactions & flushes 2024-11-22T03:50:25,976 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. 2024-11-22T03:50:25,976 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. 2024-11-22T03:50:25,976 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. after waiting 0 ms 2024-11-22T03:50:25,976 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. 2024-11-22T03:50:25,976 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing b6129dd3d19b786934def7fda16374cd 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-11-22T03:50:25,981 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/b21df3768daf46118b38401587ae8143 is 1080, key is row0092/info:/1732247425759/Put/seqid=0 2024-11-22T03:50:25,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741848_1024 (size=10347) 2024-11-22T03:50:25,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741848_1024 (size=10347) 2024-11-22T03:50:25,986 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/b21df3768daf46118b38401587ae8143 2024-11-22T03:50:25,992 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/.tmp/info/b21df3768daf46118b38401587ae8143 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/b21df3768daf46118b38401587ae8143 2024-11-22T03:50:25,997 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/b21df3768daf46118b38401587ae8143, entries=5, sequenceid=127, filesize=10.1 K 2024-11-22T03:50:25,998 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for b6129dd3d19b786934def7fda16374cd in 22ms, sequenceid=127, compaction requested=true 2024-11-22T03:50:26,000 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/b2540d87d4fd42ddb9bb3ad0bfe55105, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/5a7966336b2149cda9414878ae4c3769, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/ec7c77368efb45d4a3e09364730a1c48, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/4f3876a88757432d9fbb03a91608b5c8, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/43b1091cdef64aefa4931107c8198a32, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/de52b2a8a18b42588c486035216d8297, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/cb32f18b6aeb4f119d509913b7f4a703, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/398a31f08d0c43be8a1982405e038e6c, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/b93e45e282d5419eb75acecda33b02d2] to archive 2024-11-22T03:50:26,001 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T03:50:26,003 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/b2540d87d4fd42ddb9bb3ad0bfe55105 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/b2540d87d4fd42ddb9bb3ad0bfe55105 2024-11-22T03:50:26,004 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/5a7966336b2149cda9414878ae4c3769 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/5a7966336b2149cda9414878ae4c3769 2024-11-22T03:50:26,006 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/ec7c77368efb45d4a3e09364730a1c48 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/ec7c77368efb45d4a3e09364730a1c48 2024-11-22T03:50:26,007 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/4f3876a88757432d9fbb03a91608b5c8 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/4f3876a88757432d9fbb03a91608b5c8 2024-11-22T03:50:26,009 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/43b1091cdef64aefa4931107c8198a32 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/43b1091cdef64aefa4931107c8198a32 2024-11-22T03:50:26,010 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/de52b2a8a18b42588c486035216d8297 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/de52b2a8a18b42588c486035216d8297 2024-11-22T03:50:26,013 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/cb32f18b6aeb4f119d509913b7f4a703 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/cb32f18b6aeb4f119d509913b7f4a703 2024-11-22T03:50:26,018 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/398a31f08d0c43be8a1982405e038e6c to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/398a31f08d0c43be8a1982405e038e6c 2024-11-22T03:50:26,019 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/b93e45e282d5419eb75acecda33b02d2 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/b93e45e282d5419eb75acecda33b02d2 2024-11-22T03:50:26,028 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=1 2024-11-22T03:50:26,029 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. 2024-11-22T03:50:26,030 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for b6129dd3d19b786934def7fda16374cd: Waiting for close lock at 1732247425976Running coprocessor pre-close hooks at 1732247425976Disabling compacts and flushes for region at 1732247425976Disabling writes for close at 1732247425976Obtaining lock to block concurrent updates at 1732247425976Preparing flush snapshotting stores in b6129dd3d19b786934def7fda16374cd at 1732247425976Finished memstore snapshotting TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd., syncing WAL and waiting on mvcc, flushsize=dataSize=5380, getHeapSize=6000, getOffHeapSize=0, getCellsCount=5 at 1732247425976Flushing stores of TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. at 1732247425977 (+1 ms)Flushing b6129dd3d19b786934def7fda16374cd/info: creating writer at 1732247425977Flushing b6129dd3d19b786934def7fda16374cd/info: appending metadata at 1732247425980 (+3 ms)Flushing b6129dd3d19b786934def7fda16374cd/info: closing flushed file at 1732247425980Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3dbaf255: reopening flushed file at 1732247425991 (+11 ms)Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for b6129dd3d19b786934def7fda16374cd in 22ms, sequenceid=127, compaction requested=true at 1732247425998 (+7 ms)Writing region close event to WAL at 1732247426022 (+24 ms)Running coprocessor post-close hooks at 1732247426029 (+7 ms)Closed at 1732247426029 2024-11-22T03:50:26,033 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:26,034 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=b6129dd3d19b786934def7fda16374cd, regionState=CLOSED 2024-11-22T03:50:26,037 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure b6129dd3d19b786934def7fda16374cd, server=c85114ed5096,34883,1732247400539 because future has completed 2024-11-22T03:50:26,040 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-22T03:50:26,040 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure b6129dd3d19b786934def7fda16374cd, server=c85114ed5096,34883,1732247400539 in 220 msec 2024-11-22T03:50:26,042 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-22T03:50:26,043 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6129dd3d19b786934def7fda16374cd, UNASSIGN in 228 msec 2024-11-22T03:50:26,051 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:26,054 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=b6129dd3d19b786934def7fda16374cd, threads=3 2024-11-22T03:50:26,056 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/b21df3768daf46118b38401587ae8143 for region: b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:26,056 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/a3463ad954df421a89db9d14a815ce51 for region: b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:26,056 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/f037071249a5479286f188f35c05929f for region: b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:26,070 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/f037071249a5479286f188f35c05929f, top=true 2024-11-22T03:50:26,080 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/b21df3768daf46118b38401587ae8143, top=true 2024-11-22T03:50:26,081 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/TestLogRolling-testLogRolling=b6129dd3d19b786934def7fda16374cd-f037071249a5479286f188f35c05929f for child: 37356075a74a1f471bfb82cfc4617ba2, parent: b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:26,082 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/f037071249a5479286f188f35c05929f for region: b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:26,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741849_1025 (size=27) 2024-11-22T03:50:26,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741849_1025 (size=27) 2024-11-22T03:50:26,088 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/TestLogRolling-testLogRolling=b6129dd3d19b786934def7fda16374cd-b21df3768daf46118b38401587ae8143 for child: 37356075a74a1f471bfb82cfc4617ba2, parent: b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:26,088 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/b21df3768daf46118b38401587ae8143 for region: b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:26,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741850_1026 (size=27) 2024-11-22T03:50:26,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741850_1026 (size=27) 2024-11-22T03:50:26,098 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/a3463ad954df421a89db9d14a815ce51 for region: b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:26,100 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region b6129dd3d19b786934def7fda16374cd Daughter A: [hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/112c809438819710f42ad8e377df0d19/info/a3463ad954df421a89db9d14a815ce51.b6129dd3d19b786934def7fda16374cd] storefiles, Daughter B: [hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/a3463ad954df421a89db9d14a815ce51.b6129dd3d19b786934def7fda16374cd, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/TestLogRolling-testLogRolling=b6129dd3d19b786934def7fda16374cd-b21df3768daf46118b38401587ae8143, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/TestLogRolling-testLogRolling=b6129dd3d19b786934def7fda16374cd-f037071249a5479286f188f35c05929f] storefiles. 2024-11-22T03:50:26,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741851_1027 (size=71) 2024-11-22T03:50:26,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741851_1027 (size=71) 2024-11-22T03:50:26,113 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:26,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741852_1028 (size=71) 2024-11-22T03:50:26,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741852_1028 (size=71) 2024-11-22T03:50:26,127 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:26,138 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/112c809438819710f42ad8e377df0d19/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-22T03:50:26,141 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-22T03:50:26,144 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732247426143"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732247426143"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732247426143"}]},"ts":"1732247426143"} 2024-11-22T03:50:26,144 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732247426143"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732247426143"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732247426143"}]},"ts":"1732247426143"} 2024-11-22T03:50:26,144 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732247426143"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732247426143"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732247426143"}]},"ts":"1732247426143"} 2024-11-22T03:50:26,167 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=112c809438819710f42ad8e377df0d19, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=37356075a74a1f471bfb82cfc4617ba2, ASSIGN}] 2024-11-22T03:50:26,168 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=112c809438819710f42ad8e377df0d19, ASSIGN 2024-11-22T03:50:26,169 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=37356075a74a1f471bfb82cfc4617ba2, ASSIGN 2024-11-22T03:50:26,170 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=37356075a74a1f471bfb82cfc4617ba2, ASSIGN; state=SPLITTING_NEW, location=c85114ed5096,34883,1732247400539; forceNewPlan=false, retain=false 2024-11-22T03:50:26,170 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=112c809438819710f42ad8e377df0d19, ASSIGN; state=SPLITTING_NEW, location=c85114ed5096,34883,1732247400539; forceNewPlan=false, retain=false 2024-11-22T03:50:26,321 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=37356075a74a1f471bfb82cfc4617ba2, regionState=OPENING, regionLocation=c85114ed5096,34883,1732247400539 2024-11-22T03:50:26,321 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=112c809438819710f42ad8e377df0d19, regionState=OPENING, regionLocation=c85114ed5096,34883,1732247400539 2024-11-22T03:50:26,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=112c809438819710f42ad8e377df0d19, ASSIGN because future has completed 2024-11-22T03:50:26,324 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 112c809438819710f42ad8e377df0d19, server=c85114ed5096,34883,1732247400539}] 2024-11-22T03:50:26,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=37356075a74a1f471bfb82cfc4617ba2, ASSIGN because future has completed 2024-11-22T03:50:26,325 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 37356075a74a1f471bfb82cfc4617ba2, server=c85114ed5096,34883,1732247400539}] 2024-11-22T03:50:26,479 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19. 2024-11-22T03:50:26,480 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 112c809438819710f42ad8e377df0d19, NAME => 'TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-22T03:50:26,480 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 112c809438819710f42ad8e377df0d19 2024-11-22T03:50:26,480 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:50:26,480 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 112c809438819710f42ad8e377df0d19 2024-11-22T03:50:26,480 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 112c809438819710f42ad8e377df0d19 2024-11-22T03:50:26,482 INFO [StoreOpener-112c809438819710f42ad8e377df0d19-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 112c809438819710f42ad8e377df0d19 2024-11-22T03:50:26,483 INFO [StoreOpener-112c809438819710f42ad8e377df0d19-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 112c809438819710f42ad8e377df0d19 columnFamilyName info 2024-11-22T03:50:26,483 DEBUG [StoreOpener-112c809438819710f42ad8e377df0d19-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:26,498 DEBUG [StoreOpener-112c809438819710f42ad8e377df0d19-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/112c809438819710f42ad8e377df0d19/info/a3463ad954df421a89db9d14a815ce51.b6129dd3d19b786934def7fda16374cd->hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/a3463ad954df421a89db9d14a815ce51-bottom 2024-11-22T03:50:26,499 INFO [StoreOpener-112c809438819710f42ad8e377df0d19-1 {}] regionserver.HStore(327): Store=112c809438819710f42ad8e377df0d19/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:50:26,499 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 112c809438819710f42ad8e377df0d19 2024-11-22T03:50:26,500 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/112c809438819710f42ad8e377df0d19 2024-11-22T03:50:26,501 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/112c809438819710f42ad8e377df0d19 2024-11-22T03:50:26,502 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 112c809438819710f42ad8e377df0d19 2024-11-22T03:50:26,502 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 112c809438819710f42ad8e377df0d19 2024-11-22T03:50:26,504 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 112c809438819710f42ad8e377df0d19 2024-11-22T03:50:26,505 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 112c809438819710f42ad8e377df0d19; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=689870, jitterRate=-0.122785285115242}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T03:50:26,505 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 112c809438819710f42ad8e377df0d19 2024-11-22T03:50:26,506 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 112c809438819710f42ad8e377df0d19: Running coprocessor pre-open hook at 1732247426480Writing region info on filesystem at 1732247426480Initializing all the Stores at 1732247426481 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247426481Cleaning up temporary data from old regions at 1732247426502 (+21 ms)Running coprocessor post-open hooks at 1732247426505 (+3 ms)Region opened successfully at 1732247426506 (+1 ms) 2024-11-22T03:50:26,507 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19., pid=12, masterSystemTime=1732247426476 2024-11-22T03:50:26,507 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 112c809438819710f42ad8e377df0d19:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:50:26,507 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-22T03:50:26,507 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:26,508 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19. 2024-11-22T03:50:26,508 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1541): 112c809438819710f42ad8e377df0d19/info is initiating minor compaction (all files) 2024-11-22T03:50:26,508 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 112c809438819710f42ad8e377df0d19/info in TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19. 2024-11-22T03:50:26,508 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/112c809438819710f42ad8e377df0d19/info/a3463ad954df421a89db9d14a815ce51.b6129dd3d19b786934def7fda16374cd->hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/a3463ad954df421a89db9d14a815ce51-bottom] into tmpdir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/112c809438819710f42ad8e377df0d19/.tmp, totalSize=88.4 K 2024-11-22T03:50:26,509 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting a3463ad954df421a89db9d14a815ce51.b6129dd3d19b786934def7fda16374cd, keycount=39, bloomtype=ROW, size=88.4 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1732247411463 2024-11-22T03:50:26,510 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19. 2024-11-22T03:50:26,511 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19. 2024-11-22T03:50:26,511 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2. 2024-11-22T03:50:26,511 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=112c809438819710f42ad8e377df0d19, regionState=OPEN, openSeqNum=131, regionLocation=c85114ed5096,34883,1732247400539 2024-11-22T03:50:26,511 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 37356075a74a1f471bfb82cfc4617ba2, NAME => 'TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-22T03:50:26,511 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:26,511 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:50:26,511 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:26,511 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:26,513 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-22T03:50:26,513 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-22T03:50:26,513 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-22T03:50:26,513 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 112c809438819710f42ad8e377df0d19, server=c85114ed5096,34883,1732247400539 because future has completed 2024-11-22T03:50:26,516 INFO [StoreOpener-37356075a74a1f471bfb82cfc4617ba2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:26,518 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-22T03:50:26,518 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 112c809438819710f42ad8e377df0d19, server=c85114ed5096,34883,1732247400539 in 192 msec 2024-11-22T03:50:26,520 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=112c809438819710f42ad8e377df0d19, ASSIGN in 351 msec 2024-11-22T03:50:26,522 INFO [StoreOpener-37356075a74a1f471bfb82cfc4617ba2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 37356075a74a1f471bfb82cfc4617ba2 columnFamilyName info 2024-11-22T03:50:26,522 DEBUG [StoreOpener-37356075a74a1f471bfb82cfc4617ba2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:26,535 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 112c809438819710f42ad8e377df0d19#info#compaction#67 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:50:26,536 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/112c809438819710f42ad8e377df0d19/.tmp/info/0a80ef55f3074bcc816506c7a69b046c is 1080, key is row0001/info:/1732247411463/Put/seqid=0 2024-11-22T03:50:26,544 DEBUG [StoreOpener-37356075a74a1f471bfb82cfc4617ba2-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/TestLogRolling-testLogRolling=b6129dd3d19b786934def7fda16374cd-b21df3768daf46118b38401587ae8143 2024-11-22T03:50:26,548 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/.tmp/info/12212980ce4241df99f571057d6e8beb is 193, key is TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2./info:regioninfo/1732247426320/Put/seqid=0 2024-11-22T03:50:26,553 DEBUG [StoreOpener-37356075a74a1f471bfb82cfc4617ba2-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/TestLogRolling-testLogRolling=b6129dd3d19b786934def7fda16374cd-f037071249a5479286f188f35c05929f 2024-11-22T03:50:26,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741853_1029 (size=70862) 2024-11-22T03:50:26,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741853_1029 (size=70862) 2024-11-22T03:50:26,559 DEBUG [StoreOpener-37356075a74a1f471bfb82cfc4617ba2-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/a3463ad954df421a89db9d14a815ce51.b6129dd3d19b786934def7fda16374cd->hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/a3463ad954df421a89db9d14a815ce51-top 2024-11-22T03:50:26,560 INFO [StoreOpener-37356075a74a1f471bfb82cfc4617ba2-1 {}] regionserver.HStore(327): Store=37356075a74a1f471bfb82cfc4617ba2/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:50:26,560 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:26,561 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:26,562 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:26,562 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:26,562 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:26,564 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:26,565 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 37356075a74a1f471bfb82cfc4617ba2; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=844770, jitterRate=0.07418185472488403}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T03:50:26,565 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:26,565 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 37356075a74a1f471bfb82cfc4617ba2: Running coprocessor pre-open hook at 1732247426512Writing region info on filesystem at 1732247426512Initializing all the Stores at 1732247426514 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247426514Cleaning up temporary data from old regions at 1732247426562 (+48 ms)Running coprocessor post-open hooks at 1732247426565 (+3 ms)Region opened successfully at 1732247426565 2024-11-22T03:50:26,566 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/112c809438819710f42ad8e377df0d19/.tmp/info/0a80ef55f3074bcc816506c7a69b046c as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/112c809438819710f42ad8e377df0d19/info/0a80ef55f3074bcc816506c7a69b046c 2024-11-22T03:50:26,567 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2., pid=13, masterSystemTime=1732247426476 2024-11-22T03:50:26,567 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 37356075a74a1f471bfb82cfc4617ba2:info, priority=-2147483648, current under compaction store size is 2 2024-11-22T03:50:26,567 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:26,567 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:50:26,569 INFO [RS:0;c85114ed5096:34883-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2. 2024-11-22T03:50:26,569 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] regionserver.HStore(1541): 37356075a74a1f471bfb82cfc4617ba2/info is initiating minor compaction (all files) 2024-11-22T03:50:26,569 INFO [RS:0;c85114ed5096:34883-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 37356075a74a1f471bfb82cfc4617ba2/info in TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2. 2024-11-22T03:50:26,569 INFO [RS:0;c85114ed5096:34883-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/a3463ad954df421a89db9d14a815ce51.b6129dd3d19b786934def7fda16374cd->hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/a3463ad954df421a89db9d14a815ce51-top, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/TestLogRolling-testLogRolling=b6129dd3d19b786934def7fda16374cd-f037071249a5479286f188f35c05929f, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/TestLogRolling-testLogRolling=b6129dd3d19b786934def7fda16374cd-b21df3768daf46118b38401587ae8143] into tmpdir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp, totalSize=116.0 K 2024-11-22T03:50:26,570 DEBUG [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2. 2024-11-22T03:50:26,570 INFO [RS_OPEN_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2. 2024-11-22T03:50:26,571 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] compactions.Compactor(225): Compacting a3463ad954df421a89db9d14a815ce51.b6129dd3d19b786934def7fda16374cd, keycount=39, bloomtype=ROW, size=88.4 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1732247411463 2024-11-22T03:50:26,571 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=37356075a74a1f471bfb82cfc4617ba2, regionState=OPEN, openSeqNum=131, regionLocation=c85114ed5096,34883,1732247400539 2024-11-22T03:50:26,572 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=b6129dd3d19b786934def7fda16374cd-f037071249a5479286f188f35c05929f, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732247425734 2024-11-22T03:50:26,573 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 37356075a74a1f471bfb82cfc4617ba2, server=c85114ed5096,34883,1732247400539 because future has completed 2024-11-22T03:50:26,573 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=b6129dd3d19b786934def7fda16374cd-b21df3768daf46118b38401587ae8143, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732247425759 2024-11-22T03:50:26,579 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 112c809438819710f42ad8e377df0d19/info of 112c809438819710f42ad8e377df0d19 into 0a80ef55f3074bcc816506c7a69b046c(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:50:26,579 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 112c809438819710f42ad8e377df0d19: 2024-11-22T03:50:26,579 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19., storeName=112c809438819710f42ad8e377df0d19/info, priority=15, startTime=1732247426507; duration=0sec 2024-11-22T03:50:26,579 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:26,579 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 112c809438819710f42ad8e377df0d19:info 2024-11-22T03:50:26,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741854_1030 (size=9847) 2024-11-22T03:50:26,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741854_1030 (size=9847) 2024-11-22T03:50:26,596 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/.tmp/info/12212980ce4241df99f571057d6e8beb 2024-11-22T03:50:26,606 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38717 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=c85114ed5096,34883,1732247400539, table=TestLogRolling-testLogRolling, region=37356075a74a1f471bfb82cfc4617ba2. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-22T03:50:26,610 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-22T03:50:26,610 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 37356075a74a1f471bfb82cfc4617ba2, server=c85114ed5096,34883,1732247400539 in 281 msec 2024-11-22T03:50:26,617 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-22T03:50:26,618 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=37356075a74a1f471bfb82cfc4617ba2, ASSIGN in 444 msec 2024-11-22T03:50:26,619 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b6129dd3d19b786934def7fda16374cd, daughterA=112c809438819710f42ad8e377df0d19, daughterB=37356075a74a1f471bfb82cfc4617ba2 in 817 msec 2024-11-22T03:50:26,629 INFO [RS:0;c85114ed5096:34883-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 37356075a74a1f471bfb82cfc4617ba2#info#compaction#69 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:50:26,630 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/b026bdbefd7b4a3baf19005f5367a24f is 1080, key is row0062/info:/1732247423692/Put/seqid=0 2024-11-22T03:50:26,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741855_1031 (size=42984) 2024-11-22T03:50:26,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741855_1031 (size=42984) 2024-11-22T03:50:26,648 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/.tmp/ns/d2fe8981a9f7421e91e4d64868cb3879 is 43, key is default/ns:d/1732247401326/Put/seqid=0 2024-11-22T03:50:26,656 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/b026bdbefd7b4a3baf19005f5367a24f as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/b026bdbefd7b4a3baf19005f5367a24f 2024-11-22T03:50:26,662 INFO [RS:0;c85114ed5096:34883-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 37356075a74a1f471bfb82cfc4617ba2/info of 37356075a74a1f471bfb82cfc4617ba2 into b026bdbefd7b4a3baf19005f5367a24f(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:50:26,663 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:26,663 INFO [RS:0;c85114ed5096:34883-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2., storeName=37356075a74a1f471bfb82cfc4617ba2/info, priority=13, startTime=1732247426567; duration=0sec 2024-11-22T03:50:26,663 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:26,663 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 37356075a74a1f471bfb82cfc4617ba2:info 2024-11-22T03:50:26,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741856_1032 (size=5153) 2024-11-22T03:50:26,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741856_1032 (size=5153) 2024-11-22T03:50:26,670 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/.tmp/ns/d2fe8981a9f7421e91e4d64868cb3879 2024-11-22T03:50:26,700 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/.tmp/table/622de32dceae4e6a84f88bae4a61e9e0 is 65, key is TestLogRolling-testLogRolling/table:state/1732247401748/Put/seqid=0 2024-11-22T03:50:26,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741857_1033 (size=5340) 2024-11-22T03:50:26,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741857_1033 (size=5340) 2024-11-22T03:50:26,728 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/.tmp/table/622de32dceae4e6a84f88bae4a61e9e0 2024-11-22T03:50:26,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/.tmp/info/12212980ce4241df99f571057d6e8beb as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/info/12212980ce4241df99f571057d6e8beb 2024-11-22T03:50:26,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/info/12212980ce4241df99f571057d6e8beb, entries=30, sequenceid=17, filesize=9.6 K 2024-11-22T03:50:26,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/.tmp/ns/d2fe8981a9f7421e91e4d64868cb3879 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/ns/d2fe8981a9f7421e91e4d64868cb3879 2024-11-22T03:50:26,764 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/ns/d2fe8981a9f7421e91e4d64868cb3879, entries=2, sequenceid=17, filesize=5.0 K 2024-11-22T03:50:26,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/.tmp/table/622de32dceae4e6a84f88bae4a61e9e0 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/table/622de32dceae4e6a84f88bae4a61e9e0 2024-11-22T03:50:26,776 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/table/622de32dceae4e6a84f88bae4a61e9e0, entries=2, sequenceid=17, filesize=5.2 K 2024-11-22T03:50:26,777 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 264ms, sequenceid=17, compaction requested=false 2024-11-22T03:50:26,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-22T03:50:26,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:26,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:27,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:36400 deadline: 1732247437774, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. is not online on c85114ed5096,34883,1732247400539 2024-11-22T03:50:27,775 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd., hostname=c85114ed5096,34883,1732247400539, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd., hostname=c85114ed5096,34883,1732247400539, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. is not online on c85114ed5096,34883,1732247400539 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T03:50:27,775 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd., hostname=c85114ed5096,34883,1732247400539, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd. is not online on c85114ed5096,34883,1732247400539 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T03:50:27,775 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732247401368.b6129dd3d19b786934def7fda16374cd., hostname=c85114ed5096,34883,1732247400539, seqNum=2 from cache 2024-11-22T03:50:27,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:27,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:28,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:28,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:29,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:29,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:30,476 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T03:50:30,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:30,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:31,031 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,031 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,032 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,032 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,033 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,033 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,035 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,036 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,061 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,061 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,061 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,062 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,062 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,062 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,065 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,065 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,065 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,576 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T03:50:31,577 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,608 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,608 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,608 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,608 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,608 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,611 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,612 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,612 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T03:50:31,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:31,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:32,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:32,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:33,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:33,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:34,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:34,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:35,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:35,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:36,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:36,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:37,898 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2., hostname=c85114ed5096,34883,1732247400539, seqNum=131] 2024-11-22T03:50:37,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:37,910 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 37356075a74a1f471bfb82cfc4617ba2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:50:37,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/847ac45abf924eccaadbce51ce10741a is 1080, key is row0097/info:/1732247437900/Put/seqid=0 2024-11-22T03:50:37,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:37,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:37,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741858_1034 (size=12516) 2024-11-22T03:50:37,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741858_1034 (size=12516) 2024-11-22T03:50:37,920 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/847ac45abf924eccaadbce51ce10741a 2024-11-22T03:50:37,927 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/847ac45abf924eccaadbce51ce10741a as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/847ac45abf924eccaadbce51ce10741a 2024-11-22T03:50:37,932 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/847ac45abf924eccaadbce51ce10741a, entries=7, sequenceid=141, filesize=12.2 K 2024-11-22T03:50:37,933 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 37356075a74a1f471bfb82cfc4617ba2 in 23ms, sequenceid=141, compaction requested=false 2024-11-22T03:50:37,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:37,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:37,951 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 37356075a74a1f471bfb82cfc4617ba2 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-22T03:50:37,955 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/a896de17c28143d38249a001ae4c5174 is 1080, key is row0104/info:/1732247437911/Put/seqid=0 2024-11-22T03:50:37,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741859_1035 (size=19000) 2024-11-22T03:50:37,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741859_1035 (size=19000) 2024-11-22T03:50:37,962 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/a896de17c28143d38249a001ae4c5174 2024-11-22T03:50:37,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/a896de17c28143d38249a001ae4c5174 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/a896de17c28143d38249a001ae4c5174 2024-11-22T03:50:37,978 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/a896de17c28143d38249a001ae4c5174, entries=13, sequenceid=157, filesize=18.6 K 2024-11-22T03:50:37,979 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=10.51 KB/10760 for 37356075a74a1f471bfb82cfc4617ba2 in 28ms, sequenceid=157, compaction requested=true 2024-11-22T03:50:37,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:37,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 37356075a74a1f471bfb82cfc4617ba2:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:50:37,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:37,979 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:50:37,981 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 74500 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:50:37,981 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1541): 37356075a74a1f471bfb82cfc4617ba2/info is initiating minor compaction (all files) 2024-11-22T03:50:37,981 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 37356075a74a1f471bfb82cfc4617ba2/info in TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2. 2024-11-22T03:50:37,981 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/b026bdbefd7b4a3baf19005f5367a24f, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/847ac45abf924eccaadbce51ce10741a, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/a896de17c28143d38249a001ae4c5174] into tmpdir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp, totalSize=72.8 K 2024-11-22T03:50:37,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:37,981 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting b026bdbefd7b4a3baf19005f5367a24f, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732247423692 2024-11-22T03:50:37,982 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 37356075a74a1f471bfb82cfc4617ba2 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-22T03:50:37,982 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting 847ac45abf924eccaadbce51ce10741a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1732247437900 2024-11-22T03:50:37,982 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting a896de17c28143d38249a001ae4c5174, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732247437911 2024-11-22T03:50:37,986 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/7449a8cd9aba4fd795cc515040a9f907 is 1080, key is row0117/info:/1732247437953/Put/seqid=0 2024-11-22T03:50:38,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741860_1036 (size=17906) 2024-11-22T03:50:38,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741860_1036 (size=17906) 2024-11-22T03:50:38,002 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/7449a8cd9aba4fd795cc515040a9f907 2024-11-22T03:50:38,002 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 37356075a74a1f471bfb82cfc4617ba2#info#compaction#75 average throughput is 56.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:50:38,003 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/4c5307d179be49a0b90d50c475a2c7ec is 1080, key is row0062/info:/1732247423692/Put/seqid=0 2024-11-22T03:50:38,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741861_1037 (size=64714) 2024-11-22T03:50:38,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741861_1037 (size=64714) 2024-11-22T03:50:38,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/7449a8cd9aba4fd795cc515040a9f907 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/7449a8cd9aba4fd795cc515040a9f907 2024-11-22T03:50:38,014 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/7449a8cd9aba4fd795cc515040a9f907, entries=12, sequenceid=172, filesize=17.5 K 2024-11-22T03:50:38,014 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/4c5307d179be49a0b90d50c475a2c7ec as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/4c5307d179be49a0b90d50c475a2c7ec 2024-11-22T03:50:38,015 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=0 B/0 for 37356075a74a1f471bfb82cfc4617ba2 in 33ms, sequenceid=172, compaction requested=false 2024-11-22T03:50:38,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:38,021 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 37356075a74a1f471bfb82cfc4617ba2/info of 37356075a74a1f471bfb82cfc4617ba2 into 4c5307d179be49a0b90d50c475a2c7ec(size=63.2 K), total size for store is 80.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:50:38,021 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:38,021 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2., storeName=37356075a74a1f471bfb82cfc4617ba2/info, priority=13, startTime=1732247437979; duration=0sec 2024-11-22T03:50:38,021 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:38,021 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 37356075a74a1f471bfb82cfc4617ba2:info 2024-11-22T03:50:38,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:38,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:39,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:39,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:40,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:40,007 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 37356075a74a1f471bfb82cfc4617ba2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:50:40,011 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/48704d47f08b46c8a8bb72ab538e25df is 1080, key is row0129/info:/1732247439984/Put/seqid=0 2024-11-22T03:50:40,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741862_1038 (size=12516) 2024-11-22T03:50:40,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741862_1038 (size=12516) 2024-11-22T03:50:40,017 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/48704d47f08b46c8a8bb72ab538e25df 2024-11-22T03:50:40,022 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/48704d47f08b46c8a8bb72ab538e25df as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/48704d47f08b46c8a8bb72ab538e25df 2024-11-22T03:50:40,028 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/48704d47f08b46c8a8bb72ab538e25df, entries=7, sequenceid=183, filesize=12.2 K 2024-11-22T03:50:40,029 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 37356075a74a1f471bfb82cfc4617ba2 in 21ms, sequenceid=183, compaction requested=true 2024-11-22T03:50:40,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:40,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 37356075a74a1f471bfb82cfc4617ba2:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:50:40,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:40,029 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:50:40,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:40,029 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 37356075a74a1f471bfb82cfc4617ba2 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-22T03:50:40,030 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 95136 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:50:40,030 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1541): 37356075a74a1f471bfb82cfc4617ba2/info is initiating minor compaction (all files) 2024-11-22T03:50:40,030 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 37356075a74a1f471bfb82cfc4617ba2/info in TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2. 2024-11-22T03:50:40,030 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/4c5307d179be49a0b90d50c475a2c7ec, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/7449a8cd9aba4fd795cc515040a9f907, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/48704d47f08b46c8a8bb72ab538e25df] into tmpdir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp, totalSize=92.9 K 2024-11-22T03:50:40,031 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4c5307d179be49a0b90d50c475a2c7ec, keycount=55, bloomtype=ROW, size=63.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732247423692 2024-11-22T03:50:40,031 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7449a8cd9aba4fd795cc515040a9f907, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732247437953 2024-11-22T03:50:40,031 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting 48704d47f08b46c8a8bb72ab538e25df, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1732247439984 2024-11-22T03:50:40,033 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/55aa215ad22c4fd78465328309c5da05 is 1080, key is row0136/info:/1732247440008/Put/seqid=0 2024-11-22T03:50:40,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741863_1039 (size=16828) 2024-11-22T03:50:40,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741863_1039 (size=16828) 2024-11-22T03:50:40,040 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/55aa215ad22c4fd78465328309c5da05 2024-11-22T03:50:40,043 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 37356075a74a1f471bfb82cfc4617ba2#info#compaction#78 average throughput is 37.97 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:50:40,044 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/d18c83bfa097486294d9fa3a1b971772 is 1080, key is row0062/info:/1732247423692/Put/seqid=0 2024-11-22T03:50:40,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/55aa215ad22c4fd78465328309c5da05 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/55aa215ad22c4fd78465328309c5da05 2024-11-22T03:50:40,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741864_1040 (size=85371) 2024-11-22T03:50:40,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741864_1040 (size=85371) 2024-11-22T03:50:40,052 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/55aa215ad22c4fd78465328309c5da05, entries=11, sequenceid=197, filesize=16.4 K 2024-11-22T03:50:40,053 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for 37356075a74a1f471bfb82cfc4617ba2 in 24ms, sequenceid=197, compaction requested=false 2024-11-22T03:50:40,053 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:40,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:40,057 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 37356075a74a1f471bfb82cfc4617ba2 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-22T03:50:40,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/1dd7375db6e44b4cb9348a13bf27e41d is 1080, key is row0147/info:/1732247440030/Put/seqid=0 2024-11-22T03:50:40,062 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/d18c83bfa097486294d9fa3a1b971772 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/d18c83bfa097486294d9fa3a1b971772 2024-11-22T03:50:40,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741865_1041 (size=15750) 2024-11-22T03:50:40,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741865_1041 (size=15750) 2024-11-22T03:50:40,067 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/1dd7375db6e44b4cb9348a13bf27e41d 2024-11-22T03:50:40,069 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 37356075a74a1f471bfb82cfc4617ba2/info of 37356075a74a1f471bfb82cfc4617ba2 into d18c83bfa097486294d9fa3a1b971772(size=83.4 K), total size for store is 99.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:50:40,069 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:40,069 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2., storeName=37356075a74a1f471bfb82cfc4617ba2/info, priority=13, startTime=1732247440029; duration=0sec 2024-11-22T03:50:40,069 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:40,069 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 37356075a74a1f471bfb82cfc4617ba2:info 2024-11-22T03:50:40,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/1dd7375db6e44b4cb9348a13bf27e41d as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/1dd7375db6e44b4cb9348a13bf27e41d 2024-11-22T03:50:40,078 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/1dd7375db6e44b4cb9348a13bf27e41d, entries=10, sequenceid=210, filesize=15.4 K 2024-11-22T03:50:40,079 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=4.20 KB/4304 for 37356075a74a1f471bfb82cfc4617ba2 in 22ms, sequenceid=210, compaction requested=true 2024-11-22T03:50:40,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:40,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 37356075a74a1f471bfb82cfc4617ba2:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:50:40,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:40,079 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:50:40,081 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 117949 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:50:40,081 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1541): 37356075a74a1f471bfb82cfc4617ba2/info is initiating minor compaction (all files) 2024-11-22T03:50:40,081 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 37356075a74a1f471bfb82cfc4617ba2/info in TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2. 2024-11-22T03:50:40,081 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/d18c83bfa097486294d9fa3a1b971772, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/55aa215ad22c4fd78465328309c5da05, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/1dd7375db6e44b4cb9348a13bf27e41d] into tmpdir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp, totalSize=115.2 K 2024-11-22T03:50:40,081 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting d18c83bfa097486294d9fa3a1b971772, keycount=74, bloomtype=ROW, size=83.4 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1732247423692 2024-11-22T03:50:40,082 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting 55aa215ad22c4fd78465328309c5da05, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732247440008 2024-11-22T03:50:40,082 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1dd7375db6e44b4cb9348a13bf27e41d, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732247440030 2024-11-22T03:50:40,092 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 37356075a74a1f471bfb82cfc4617ba2#info#compaction#80 average throughput is 97.48 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:50:40,093 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/61846958eebe4ce29468fb896f1a2293 is 1080, key is row0062/info:/1732247423692/Put/seqid=0 2024-11-22T03:50:40,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741866_1042 (size=108119) 2024-11-22T03:50:40,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741866_1042 (size=108119) 2024-11-22T03:50:40,101 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/61846958eebe4ce29468fb896f1a2293 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/61846958eebe4ce29468fb896f1a2293 2024-11-22T03:50:40,107 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 37356075a74a1f471bfb82cfc4617ba2/info of 37356075a74a1f471bfb82cfc4617ba2 into 61846958eebe4ce29468fb896f1a2293(size=105.6 K), total size for store is 105.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:50:40,107 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:40,107 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2., storeName=37356075a74a1f471bfb82cfc4617ba2/info, priority=13, startTime=1732247440079; duration=0sec 2024-11-22T03:50:40,107 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:40,108 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 37356075a74a1f471bfb82cfc4617ba2:info 2024-11-22T03:50:40,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:40,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:41,340 INFO [master/c85114ed5096:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T03:50:41,340 INFO [master/c85114ed5096:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T03:50:41,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:41,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:42,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:42,079 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 37356075a74a1f471bfb82cfc4617ba2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:50:42,082 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/e5374bffccd343689e42fb271ce031d1 is 1080, key is row0157/info:/1732247440058/Put/seqid=0 2024-11-22T03:50:42,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741867_1043 (size=12516) 2024-11-22T03:50:42,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741867_1043 (size=12516) 2024-11-22T03:50:42,087 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=222 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/e5374bffccd343689e42fb271ce031d1 2024-11-22T03:50:42,093 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/e5374bffccd343689e42fb271ce031d1 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e5374bffccd343689e42fb271ce031d1 2024-11-22T03:50:42,099 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e5374bffccd343689e42fb271ce031d1, entries=7, sequenceid=222, filesize=12.2 K 2024-11-22T03:50:42,100 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 37356075a74a1f471bfb82cfc4617ba2 in 20ms, sequenceid=222, compaction requested=false 2024-11-22T03:50:42,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:42,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:42,100 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 37356075a74a1f471bfb82cfc4617ba2 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-22T03:50:42,104 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/e5948f51b41a4ed28bc5cd471d2a7870 is 1080, key is row0164/info:/1732247442080/Put/seqid=0 2024-11-22T03:50:42,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741868_1044 (size=17906) 2024-11-22T03:50:42,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741868_1044 (size=17906) 2024-11-22T03:50:42,109 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/e5948f51b41a4ed28bc5cd471d2a7870 2024-11-22T03:50:42,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/e5948f51b41a4ed28bc5cd471d2a7870 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e5948f51b41a4ed28bc5cd471d2a7870 2024-11-22T03:50:42,120 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e5948f51b41a4ed28bc5cd471d2a7870, entries=12, sequenceid=237, filesize=17.5 K 2024-11-22T03:50:42,121 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 37356075a74a1f471bfb82cfc4617ba2 in 21ms, sequenceid=237, compaction requested=true 2024-11-22T03:50:42,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:42,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 37356075a74a1f471bfb82cfc4617ba2:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:50:42,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:42,121 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:50:42,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:42,122 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 37356075a74a1f471bfb82cfc4617ba2 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-22T03:50:42,122 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 138541 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:50:42,122 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1541): 37356075a74a1f471bfb82cfc4617ba2/info is initiating minor compaction (all files) 2024-11-22T03:50:42,123 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 37356075a74a1f471bfb82cfc4617ba2/info in TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2. 2024-11-22T03:50:42,123 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/61846958eebe4ce29468fb896f1a2293, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e5374bffccd343689e42fb271ce031d1, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e5948f51b41a4ed28bc5cd471d2a7870] into tmpdir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp, totalSize=135.3 K 2024-11-22T03:50:42,123 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting 61846958eebe4ce29468fb896f1a2293, keycount=95, bloomtype=ROW, size=105.6 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732247423692 2024-11-22T03:50:42,123 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting e5374bffccd343689e42fb271ce031d1, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1732247440058 2024-11-22T03:50:42,124 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting e5948f51b41a4ed28bc5cd471d2a7870, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732247442080 2024-11-22T03:50:42,126 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/bb7ffbc1358c4049becadd4c793f949b is 1080, key is row0176/info:/1732247442102/Put/seqid=0 2024-11-22T03:50:42,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741869_1045 (size=16828) 2024-11-22T03:50:42,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741869_1045 (size=16828) 2024-11-22T03:50:42,135 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 37356075a74a1f471bfb82cfc4617ba2#info#compaction#84 average throughput is 58.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:50:42,136 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/7d5d51b14b854dcdacc74e523c1bc409 is 1080, key is row0062/info:/1732247423692/Put/seqid=0 2024-11-22T03:50:42,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741870_1046 (size=128835) 2024-11-22T03:50:42,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741870_1046 (size=128835) 2024-11-22T03:50:42,146 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/7d5d51b14b854dcdacc74e523c1bc409 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/7d5d51b14b854dcdacc74e523c1bc409 2024-11-22T03:50:42,152 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 37356075a74a1f471bfb82cfc4617ba2/info of 37356075a74a1f471bfb82cfc4617ba2 into 7d5d51b14b854dcdacc74e523c1bc409(size=125.8 K), total size for store is 125.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:50:42,152 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:42,152 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2., storeName=37356075a74a1f471bfb82cfc4617ba2/info, priority=13, startTime=1732247442121; duration=0sec 2024-11-22T03:50:42,152 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:42,152 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 37356075a74a1f471bfb82cfc4617ba2:info 2024-11-22T03:50:42,533 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/bb7ffbc1358c4049becadd4c793f949b 2024-11-22T03:50:42,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/bb7ffbc1358c4049becadd4c793f949b as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/bb7ffbc1358c4049becadd4c793f949b 2024-11-22T03:50:42,550 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/bb7ffbc1358c4049becadd4c793f949b, entries=11, sequenceid=251, filesize=16.4 K 2024-11-22T03:50:42,551 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=6.30 KB/6456 for 37356075a74a1f471bfb82cfc4617ba2 in 429ms, sequenceid=251, compaction requested=false 2024-11-22T03:50:42,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:42,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:42,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:43,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:43,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:44,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:44,140 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 37356075a74a1f471bfb82cfc4617ba2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T03:50:44,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/d70ff4916a654d47a10612d1a7c2c0ab is 1080, key is row0187/info:/1732247442123/Put/seqid=0 2024-11-22T03:50:44,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741871_1047 (size=12520) 2024-11-22T03:50:44,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741871_1047 (size=12520) 2024-11-22T03:50:44,154 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/d70ff4916a654d47a10612d1a7c2c0ab 2024-11-22T03:50:44,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/d70ff4916a654d47a10612d1a7c2c0ab as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/d70ff4916a654d47a10612d1a7c2c0ab 2024-11-22T03:50:44,165 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/d70ff4916a654d47a10612d1a7c2c0ab, entries=7, sequenceid=262, filesize=12.2 K 2024-11-22T03:50:44,166 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 37356075a74a1f471bfb82cfc4617ba2 in 27ms, sequenceid=262, compaction requested=true 2024-11-22T03:50:44,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:44,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 37356075a74a1f471bfb82cfc4617ba2:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:50:44,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:44,166 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:50:44,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:44,167 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 37356075a74a1f471bfb82cfc4617ba2 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-22T03:50:44,167 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 158183 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:50:44,167 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1541): 37356075a74a1f471bfb82cfc4617ba2/info is initiating minor compaction (all files) 2024-11-22T03:50:44,167 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 37356075a74a1f471bfb82cfc4617ba2/info in TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2. 2024-11-22T03:50:44,167 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/7d5d51b14b854dcdacc74e523c1bc409, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/bb7ffbc1358c4049becadd4c793f949b, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/d70ff4916a654d47a10612d1a7c2c0ab] into tmpdir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp, totalSize=154.5 K 2024-11-22T03:50:44,168 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7d5d51b14b854dcdacc74e523c1bc409, keycount=114, bloomtype=ROW, size=125.8 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732247423692 2024-11-22T03:50:44,168 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting bb7ffbc1358c4049becadd4c793f949b, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732247442102 2024-11-22T03:50:44,168 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting d70ff4916a654d47a10612d1a7c2c0ab, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732247442123 2024-11-22T03:50:44,170 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/2b052bd7cebe49d88353c0d61616d00a is 1080, key is row0194/info:/1732247444142/Put/seqid=0 2024-11-22T03:50:44,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741872_1048 (size=17918) 2024-11-22T03:50:44,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741872_1048 (size=17918) 2024-11-22T03:50:44,176 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/2b052bd7cebe49d88353c0d61616d00a 2024-11-22T03:50:44,181 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 37356075a74a1f471bfb82cfc4617ba2#info#compaction#87 average throughput is 33.86 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:50:44,181 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/7f8f6cc08df74035afca16a80f5a6cbe is 1080, key is row0062/info:/1732247423692/Put/seqid=0 2024-11-22T03:50:44,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/2b052bd7cebe49d88353c0d61616d00a as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/2b052bd7cebe49d88353c0d61616d00a 2024-11-22T03:50:44,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741873_1049 (size=148418) 2024-11-22T03:50:44,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741873_1049 (size=148418) 2024-11-22T03:50:44,189 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/2b052bd7cebe49d88353c0d61616d00a, entries=12, sequenceid=277, filesize=17.5 K 2024-11-22T03:50:44,190 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 37356075a74a1f471bfb82cfc4617ba2 in 23ms, sequenceid=277, compaction requested=false 2024-11-22T03:50:44,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:44,192 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/7f8f6cc08df74035afca16a80f5a6cbe as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/7f8f6cc08df74035afca16a80f5a6cbe 2024-11-22T03:50:44,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:44,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 37356075a74a1f471bfb82cfc4617ba2 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-22T03:50:44,196 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/aa876740ab7c4ab1bd08ccd626636a68 is 1080, key is row0206/info:/1732247444168/Put/seqid=0 2024-11-22T03:50:44,199 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 37356075a74a1f471bfb82cfc4617ba2/info of 37356075a74a1f471bfb82cfc4617ba2 into 7f8f6cc08df74035afca16a80f5a6cbe(size=144.9 K), total size for store is 162.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:50:44,199 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:44,199 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2., storeName=37356075a74a1f471bfb82cfc4617ba2/info, priority=13, startTime=1732247444166; duration=0sec 2024-11-22T03:50:44,199 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:44,199 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 37356075a74a1f471bfb82cfc4617ba2:info 2024-11-22T03:50:44,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741874_1050 (size=17918) 2024-11-22T03:50:44,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741874_1050 (size=17918) 2024-11-22T03:50:44,204 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/aa876740ab7c4ab1bd08ccd626636a68 2024-11-22T03:50:44,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/aa876740ab7c4ab1bd08ccd626636a68 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/aa876740ab7c4ab1bd08ccd626636a68 2024-11-22T03:50:44,215 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/aa876740ab7c4ab1bd08ccd626636a68, entries=12, sequenceid=292, filesize=17.5 K 2024-11-22T03:50:44,216 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=7.36 KB/7532 for 37356075a74a1f471bfb82cfc4617ba2 in 24ms, sequenceid=292, compaction requested=true 2024-11-22T03:50:44,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:44,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 37356075a74a1f471bfb82cfc4617ba2:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:50:44,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:44,216 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:50:44,217 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 184254 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:50:44,217 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1541): 37356075a74a1f471bfb82cfc4617ba2/info is initiating minor compaction (all files) 2024-11-22T03:50:44,217 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 37356075a74a1f471bfb82cfc4617ba2/info in TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2. 2024-11-22T03:50:44,217 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/7f8f6cc08df74035afca16a80f5a6cbe, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/2b052bd7cebe49d88353c0d61616d00a, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/aa876740ab7c4ab1bd08ccd626636a68] into tmpdir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp, totalSize=179.9 K 2024-11-22T03:50:44,218 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7f8f6cc08df74035afca16a80f5a6cbe, keycount=132, bloomtype=ROW, size=144.9 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732247423692 2024-11-22T03:50:44,218 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2b052bd7cebe49d88353c0d61616d00a, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732247444142 2024-11-22T03:50:44,218 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] compactions.Compactor(225): Compacting aa876740ab7c4ab1bd08ccd626636a68, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732247444168 2024-11-22T03:50:44,233 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 37356075a74a1f471bfb82cfc4617ba2#info#compaction#89 average throughput is 53.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:50:44,234 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/e2f15acee07e4f5bb6eda55e1dcc95b1 is 1080, key is row0062/info:/1732247423692/Put/seqid=0 2024-11-22T03:50:44,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741875_1051 (size=174424) 2024-11-22T03:50:44,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741875_1051 (size=174424) 2024-11-22T03:50:44,245 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/e2f15acee07e4f5bb6eda55e1dcc95b1 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e2f15acee07e4f5bb6eda55e1dcc95b1 2024-11-22T03:50:44,251 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 37356075a74a1f471bfb82cfc4617ba2/info of 37356075a74a1f471bfb82cfc4617ba2 into e2f15acee07e4f5bb6eda55e1dcc95b1(size=170.3 K), total size for store is 170.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:50:44,251 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:44,251 INFO [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2., storeName=37356075a74a1f471bfb82cfc4617ba2/info, priority=13, startTime=1732247444216; duration=0sec 2024-11-22T03:50:44,251 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:44,251 DEBUG [RS:0;c85114ed5096:34883-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 37356075a74a1f471bfb82cfc4617ba2:info 2024-11-22T03:50:44,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:44,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:45,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:45,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:46,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:46,211 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 37356075a74a1f471bfb82cfc4617ba2 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-22T03:50:46,219 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/e7b7c34ace794106997897796d67f94f is 1080, key is row0218/info:/1732247444194/Put/seqid=0 2024-11-22T03:50:46,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741876_1052 (size=13602) 2024-11-22T03:50:46,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741876_1052 (size=13602) 2024-11-22T03:50:46,227 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/e7b7c34ace794106997897796d67f94f 2024-11-22T03:50:46,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/e7b7c34ace794106997897796d67f94f as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e7b7c34ace794106997897796d67f94f 2024-11-22T03:50:46,239 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e7b7c34ace794106997897796d67f94f, entries=8, sequenceid=305, filesize=13.3 K 2024-11-22T03:50:46,240 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=13.66 KB/13988 for 37356075a74a1f471bfb82cfc4617ba2 in 29ms, sequenceid=305, compaction requested=false 2024-11-22T03:50:46,240 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:46,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:46,242 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 37356075a74a1f471bfb82cfc4617ba2 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-22T03:50:46,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/66a871fd92624203aa45d19f1f1f8973 is 1080, key is row0226/info:/1732247446213/Put/seqid=0 2024-11-22T03:50:46,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741877_1053 (size=20092) 2024-11-22T03:50:46,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741877_1053 (size=20092) 2024-11-22T03:50:46,255 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/66a871fd92624203aa45d19f1f1f8973 2024-11-22T03:50:46,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/66a871fd92624203aa45d19f1f1f8973 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/66a871fd92624203aa45d19f1f1f8973 2024-11-22T03:50:46,265 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/66a871fd92624203aa45d19f1f1f8973, entries=14, sequenceid=322, filesize=19.6 K 2024-11-22T03:50:46,266 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for 37356075a74a1f471bfb82cfc4617ba2 in 24ms, sequenceid=322, compaction requested=true 2024-11-22T03:50:46,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:46,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34883 {}] regionserver.HRegion(8855): Flush requested on 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:46,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 37356075a74a1f471bfb82cfc4617ba2:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T03:50:46,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:46,267 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T03:50:46,267 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 37356075a74a1f471bfb82cfc4617ba2 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-22T03:50:46,268 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 208118 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T03:50:46,268 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] regionserver.HStore(1541): 37356075a74a1f471bfb82cfc4617ba2/info is initiating minor compaction (all files) 2024-11-22T03:50:46,268 INFO [RS:0;c85114ed5096:34883-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 37356075a74a1f471bfb82cfc4617ba2/info in TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2. 2024-11-22T03:50:46,268 INFO [RS:0;c85114ed5096:34883-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e2f15acee07e4f5bb6eda55e1dcc95b1, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e7b7c34ace794106997897796d67f94f, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/66a871fd92624203aa45d19f1f1f8973] into tmpdir=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp, totalSize=203.2 K 2024-11-22T03:50:46,268 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] compactions.Compactor(225): Compacting e2f15acee07e4f5bb6eda55e1dcc95b1, keycount=156, bloomtype=ROW, size=170.3 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732247423692 2024-11-22T03:50:46,269 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] compactions.Compactor(225): Compacting e7b7c34ace794106997897796d67f94f, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732247444194 2024-11-22T03:50:46,269 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] compactions.Compactor(225): Compacting 66a871fd92624203aa45d19f1f1f8973, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732247446213 2024-11-22T03:50:46,271 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/8f52698dc5b54a959dd587d1bdeb273c is 1080, key is row0240/info:/1732247446243/Put/seqid=0 2024-11-22T03:50:46,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741878_1054 (size=20092) 2024-11-22T03:50:46,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741878_1054 (size=20092) 2024-11-22T03:50:46,276 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/8f52698dc5b54a959dd587d1bdeb273c 2024-11-22T03:50:46,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/8f52698dc5b54a959dd587d1bdeb273c as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/8f52698dc5b54a959dd587d1bdeb273c 2024-11-22T03:50:46,282 INFO [RS:0;c85114ed5096:34883-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 37356075a74a1f471bfb82cfc4617ba2#info#compaction#93 average throughput is 45.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T03:50:46,283 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/8722689f517f4ce9908a03a8e43b93c6 is 1080, key is row0062/info:/1732247423692/Put/seqid=0 2024-11-22T03:50:46,286 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/8f52698dc5b54a959dd587d1bdeb273c, entries=14, sequenceid=339, filesize=19.6 K 2024-11-22T03:50:46,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741879_1055 (size=198268) 2024-11-22T03:50:46,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741879_1055 (size=198268) 2024-11-22T03:50:46,287 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=3.15 KB/3228 for 37356075a74a1f471bfb82cfc4617ba2 in 20ms, sequenceid=339, compaction requested=false 2024-11-22T03:50:46,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:46,292 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/8722689f517f4ce9908a03a8e43b93c6 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/8722689f517f4ce9908a03a8e43b93c6 2024-11-22T03:50:46,298 INFO [RS:0;c85114ed5096:34883-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 37356075a74a1f471bfb82cfc4617ba2/info of 37356075a74a1f471bfb82cfc4617ba2 into 8722689f517f4ce9908a03a8e43b93c6(size=193.6 K), total size for store is 213.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T03:50:46,298 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:46,298 INFO [RS:0;c85114ed5096:34883-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2., storeName=37356075a74a1f471bfb82cfc4617ba2/info, priority=13, startTime=1732247446267; duration=0sec 2024-11-22T03:50:46,298 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T03:50:46,298 DEBUG [RS:0;c85114ed5096:34883-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 37356075a74a1f471bfb82cfc4617ba2:info 2024-11-22T03:50:46,299 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-22T03:50:46,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:46,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:47,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:47,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:48,272 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-22T03:50:48,273 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C34883%2C1732247400539.1732247448273 2024-11-22T03:50:48,278 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,278 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,278 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,278 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,278 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,279 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/WALs/c85114ed5096,34883,1732247400539/c85114ed5096%2C34883%2C1732247400539.1732247400917 with entries=320, filesize=311.00 KB; new WAL /user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/WALs/c85114ed5096,34883,1732247400539/c85114ed5096%2C34883%2C1732247400539.1732247448273 2024-11-22T03:50:48,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741833_1009 (size=318475) 2024-11-22T03:50:48,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741833_1009 (size=318475) 2024-11-22T03:50:48,288 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36665:36665),(127.0.0.1/127.0.0.1:45473:45473)] 2024-11-22T03:50:48,291 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-22T03:50:48,298 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/.tmp/info/6bb95d38d3ea44c38a3aa689a8d9424c is 193, key is TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2./info:regioninfo/1732247426570/Put/seqid=0 2024-11-22T03:50:48,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741881_1057 (size=6223) 2024-11-22T03:50:48,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741881_1057 (size=6223) 2024-11-22T03:50:48,315 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/.tmp/info/6bb95d38d3ea44c38a3aa689a8d9424c 2024-11-22T03:50:48,322 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/.tmp/info/6bb95d38d3ea44c38a3aa689a8d9424c as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/info/6bb95d38d3ea44c38a3aa689a8d9424c 2024-11-22T03:50:48,327 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/info/6bb95d38d3ea44c38a3aa689a8d9424c, entries=5, sequenceid=21, filesize=6.1 K 2024-11-22T03:50:48,328 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 37ms, sequenceid=21, compaction requested=false 2024-11-22T03:50:48,329 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-22T03:50:48,329 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 112c809438819710f42ad8e377df0d19: 2024-11-22T03:50:48,329 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 37356075a74a1f471bfb82cfc4617ba2 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-22T03:50:48,333 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/cde1e777ade0477bbab7f7b59b093818 is 1080, key is row0254/info:/1732247446268/Put/seqid=0 2024-11-22T03:50:48,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741882_1058 (size=8199) 2024-11-22T03:50:48,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741882_1058 (size=8199) 2024-11-22T03:50:48,343 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/cde1e777ade0477bbab7f7b59b093818 2024-11-22T03:50:48,349 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/.tmp/info/cde1e777ade0477bbab7f7b59b093818 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/cde1e777ade0477bbab7f7b59b093818 2024-11-22T03:50:48,354 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/cde1e777ade0477bbab7f7b59b093818, entries=3, sequenceid=346, filesize=8.0 K 2024-11-22T03:50:48,355 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 37356075a74a1f471bfb82cfc4617ba2 in 26ms, sequenceid=346, compaction requested=true 2024-11-22T03:50:48,355 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 37356075a74a1f471bfb82cfc4617ba2: 2024-11-22T03:50:48,355 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C34883%2C1732247400539.1732247448355 2024-11-22T03:50:48,360 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,361 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,361 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,361 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,361 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,361 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/WALs/c85114ed5096,34883,1732247400539/c85114ed5096%2C34883%2C1732247400539.1732247448273 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/WALs/c85114ed5096,34883,1732247400539/c85114ed5096%2C34883%2C1732247400539.1732247448355 2024-11-22T03:50:48,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741880_1056 (size=731) 2024-11-22T03:50:48,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741880_1056 (size=731) 2024-11-22T03:50:48,366 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/WALs/c85114ed5096,34883,1732247400539/c85114ed5096%2C34883%2C1732247400539.1732247400917 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/oldWALs/c85114ed5096%2C34883%2C1732247400539.1732247400917 2024-11-22T03:50:48,367 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/WALs/c85114ed5096,34883,1732247400539/c85114ed5096%2C34883%2C1732247400539.1732247448273 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/oldWALs/c85114ed5096%2C34883%2C1732247400539.1732247448273 2024-11-22T03:50:48,369 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36665:36665),(127.0.0.1/127.0.0.1:45473:45473)] 2024-11-22T03:50:48,370 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T03:50:48,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T03:50:48,370 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:50:48,370 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:50:48,370 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:50:48,370 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:50:48,370 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T03:50:48,370 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T03:50:48,370 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1029944757, stopped=false 2024-11-22T03:50:48,370 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c85114ed5096,38717,1732247400498 2024-11-22T03:50:48,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:50:48,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:50:48,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:48,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:48,371 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:50:48,372 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:50:48,372 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:50:48,372 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:50:48,372 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:50:48,372 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:50:48,372 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c85114ed5096,34883,1732247400539' ***** 2024-11-22T03:50:48,373 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T03:50:48,373 INFO [RS:0;c85114ed5096:34883 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T03:50:48,373 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T03:50:48,373 INFO [RS:0;c85114ed5096:34883 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T03:50:48,373 INFO [RS:0;c85114ed5096:34883 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T03:50:48,373 INFO [RS:0;c85114ed5096:34883 {}] regionserver.HRegionServer(3091): Received CLOSE for 112c809438819710f42ad8e377df0d19 2024-11-22T03:50:48,373 INFO [RS:0;c85114ed5096:34883 {}] regionserver.HRegionServer(3091): Received CLOSE for 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:48,373 INFO [RS:0;c85114ed5096:34883 {}] regionserver.HRegionServer(959): stopping server c85114ed5096,34883,1732247400539 2024-11-22T03:50:48,373 INFO [RS:0;c85114ed5096:34883 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:50:48,373 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 112c809438819710f42ad8e377df0d19, disabling compactions & flushes 2024-11-22T03:50:48,373 INFO [RS:0;c85114ed5096:34883 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c85114ed5096:34883. 2024-11-22T03:50:48,373 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19. 2024-11-22T03:50:48,373 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19. 2024-11-22T03:50:48,373 DEBUG [RS:0;c85114ed5096:34883 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:50:48,373 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19. after waiting 0 ms 2024-11-22T03:50:48,373 DEBUG [RS:0;c85114ed5096:34883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:50:48,373 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19. 2024-11-22T03:50:48,373 INFO [RS:0;c85114ed5096:34883 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T03:50:48,373 INFO [RS:0;c85114ed5096:34883 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T03:50:48,373 INFO [RS:0;c85114ed5096:34883 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T03:50:48,373 INFO [RS:0;c85114ed5096:34883 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T03:50:48,374 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/112c809438819710f42ad8e377df0d19/info/a3463ad954df421a89db9d14a815ce51.b6129dd3d19b786934def7fda16374cd->hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/a3463ad954df421a89db9d14a815ce51-bottom] to archive 2024-11-22T03:50:48,375 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T03:50:48,376 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/112c809438819710f42ad8e377df0d19/info/a3463ad954df421a89db9d14a815ce51.b6129dd3d19b786934def7fda16374cd to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/112c809438819710f42ad8e377df0d19/info/a3463ad954df421a89db9d14a815ce51.b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:48,376 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c85114ed5096:38717 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-22T03:50:48,377 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-22T03:50:48,379 INFO [RS:0;c85114ed5096:34883 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-22T03:50:48,379 DEBUG [RS:0;c85114ed5096:34883 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 112c809438819710f42ad8e377df0d19=TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19., 37356075a74a1f471bfb82cfc4617ba2=TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.} 2024-11-22T03:50:48,379 DEBUG [RS:0;c85114ed5096:34883 {}] regionserver.HRegionServer(1351): Waiting on 112c809438819710f42ad8e377df0d19, 1588230740, 37356075a74a1f471bfb82cfc4617ba2 2024-11-22T03:50:48,379 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:50:48,379 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:50:48,379 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:50:48,379 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:50:48,379 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:50:48,390 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-22T03:50:48,390 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/112c809438819710f42ad8e377df0d19/recovered.edits/135.seqid, newMaxSeqId=135, maxSeqId=130 2024-11-22T03:50:48,390 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:50:48,391 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:50:48,391 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19. 2024-11-22T03:50:48,391 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732247448379Running coprocessor pre-close hooks at 1732247448379Disabling compacts and flushes for region at 1732247448379Disabling writes for close at 1732247448379Writing region close event to WAL at 1732247448382 (+3 ms)Running coprocessor post-close hooks at 1732247448390 (+8 ms)Closed at 1732247448390 2024-11-22T03:50:48,391 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 112c809438819710f42ad8e377df0d19: Waiting for close lock at 1732247448373Running coprocessor pre-close hooks at 1732247448373Disabling compacts and flushes for region at 1732247448373Disabling writes for close at 1732247448373Writing region close event to WAL at 1732247448381 (+8 ms)Running coprocessor post-close hooks at 1732247448390 (+9 ms)Closed at 1732247448390 2024-11-22T03:50:48,391 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T03:50:48,391 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732247425799.112c809438819710f42ad8e377df0d19. 2024-11-22T03:50:48,391 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 37356075a74a1f471bfb82cfc4617ba2, disabling compactions & flushes 2024-11-22T03:50:48,391 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2. 2024-11-22T03:50:48,391 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2. 2024-11-22T03:50:48,391 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2. after waiting 0 ms 2024-11-22T03:50:48,391 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2. 2024-11-22T03:50:48,394 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/a3463ad954df421a89db9d14a815ce51.b6129dd3d19b786934def7fda16374cd->hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/b6129dd3d19b786934def7fda16374cd/info/a3463ad954df421a89db9d14a815ce51-top, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/TestLogRolling-testLogRolling=b6129dd3d19b786934def7fda16374cd-f037071249a5479286f188f35c05929f, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/b026bdbefd7b4a3baf19005f5367a24f, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/TestLogRolling-testLogRolling=b6129dd3d19b786934def7fda16374cd-b21df3768daf46118b38401587ae8143, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/847ac45abf924eccaadbce51ce10741a, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/4c5307d179be49a0b90d50c475a2c7ec, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/a896de17c28143d38249a001ae4c5174, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/7449a8cd9aba4fd795cc515040a9f907, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/d18c83bfa097486294d9fa3a1b971772, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/48704d47f08b46c8a8bb72ab538e25df, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/55aa215ad22c4fd78465328309c5da05, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/61846958eebe4ce29468fb896f1a2293, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/1dd7375db6e44b4cb9348a13bf27e41d, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e5374bffccd343689e42fb271ce031d1, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/7d5d51b14b854dcdacc74e523c1bc409, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e5948f51b41a4ed28bc5cd471d2a7870, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/bb7ffbc1358c4049becadd4c793f949b, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/7f8f6cc08df74035afca16a80f5a6cbe, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/d70ff4916a654d47a10612d1a7c2c0ab, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/2b052bd7cebe49d88353c0d61616d00a, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e2f15acee07e4f5bb6eda55e1dcc95b1, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/aa876740ab7c4ab1bd08ccd626636a68, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e7b7c34ace794106997897796d67f94f, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/66a871fd92624203aa45d19f1f1f8973] to archive 2024-11-22T03:50:48,395 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T03:50:48,397 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/a3463ad954df421a89db9d14a815ce51.b6129dd3d19b786934def7fda16374cd to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/a3463ad954df421a89db9d14a815ce51.b6129dd3d19b786934def7fda16374cd 2024-11-22T03:50:48,398 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/TestLogRolling-testLogRolling=b6129dd3d19b786934def7fda16374cd-f037071249a5479286f188f35c05929f to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/TestLogRolling-testLogRolling=b6129dd3d19b786934def7fda16374cd-f037071249a5479286f188f35c05929f 2024-11-22T03:50:48,400 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/b026bdbefd7b4a3baf19005f5367a24f to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/b026bdbefd7b4a3baf19005f5367a24f 2024-11-22T03:50:48,401 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/TestLogRolling-testLogRolling=b6129dd3d19b786934def7fda16374cd-b21df3768daf46118b38401587ae8143 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/TestLogRolling-testLogRolling=b6129dd3d19b786934def7fda16374cd-b21df3768daf46118b38401587ae8143 2024-11-22T03:50:48,403 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/847ac45abf924eccaadbce51ce10741a to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/847ac45abf924eccaadbce51ce10741a 2024-11-22T03:50:48,405 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/4c5307d179be49a0b90d50c475a2c7ec to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/4c5307d179be49a0b90d50c475a2c7ec 2024-11-22T03:50:48,406 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/a896de17c28143d38249a001ae4c5174 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/a896de17c28143d38249a001ae4c5174 2024-11-22T03:50:48,409 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/7449a8cd9aba4fd795cc515040a9f907 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/7449a8cd9aba4fd795cc515040a9f907 2024-11-22T03:50:48,411 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/d18c83bfa097486294d9fa3a1b971772 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/d18c83bfa097486294d9fa3a1b971772 2024-11-22T03:50:48,413 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/48704d47f08b46c8a8bb72ab538e25df to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/48704d47f08b46c8a8bb72ab538e25df 2024-11-22T03:50:48,414 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/55aa215ad22c4fd78465328309c5da05 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/55aa215ad22c4fd78465328309c5da05 2024-11-22T03:50:48,416 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/61846958eebe4ce29468fb896f1a2293 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/61846958eebe4ce29468fb896f1a2293 2024-11-22T03:50:48,418 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/1dd7375db6e44b4cb9348a13bf27e41d to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/1dd7375db6e44b4cb9348a13bf27e41d 2024-11-22T03:50:48,419 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e5374bffccd343689e42fb271ce031d1 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e5374bffccd343689e42fb271ce031d1 2024-11-22T03:50:48,421 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/7d5d51b14b854dcdacc74e523c1bc409 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/7d5d51b14b854dcdacc74e523c1bc409 2024-11-22T03:50:48,422 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e5948f51b41a4ed28bc5cd471d2a7870 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e5948f51b41a4ed28bc5cd471d2a7870 2024-11-22T03:50:48,424 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/bb7ffbc1358c4049becadd4c793f949b to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/bb7ffbc1358c4049becadd4c793f949b 2024-11-22T03:50:48,426 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/7f8f6cc08df74035afca16a80f5a6cbe to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/7f8f6cc08df74035afca16a80f5a6cbe 2024-11-22T03:50:48,427 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/d70ff4916a654d47a10612d1a7c2c0ab to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/d70ff4916a654d47a10612d1a7c2c0ab 2024-11-22T03:50:48,428 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/2b052bd7cebe49d88353c0d61616d00a to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/2b052bd7cebe49d88353c0d61616d00a 2024-11-22T03:50:48,430 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e2f15acee07e4f5bb6eda55e1dcc95b1 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e2f15acee07e4f5bb6eda55e1dcc95b1 2024-11-22T03:50:48,431 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/aa876740ab7c4ab1bd08ccd626636a68 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/aa876740ab7c4ab1bd08ccd626636a68 2024-11-22T03:50:48,433 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e7b7c34ace794106997897796d67f94f to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/e7b7c34ace794106997897796d67f94f 2024-11-22T03:50:48,435 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/66a871fd92624203aa45d19f1f1f8973 to hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/archive/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/info/66a871fd92624203aa45d19f1f1f8973 2024-11-22T03:50:48,435 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [b026bdbefd7b4a3baf19005f5367a24f=42984, 847ac45abf924eccaadbce51ce10741a=12516, 4c5307d179be49a0b90d50c475a2c7ec=64714, a896de17c28143d38249a001ae4c5174=19000, 7449a8cd9aba4fd795cc515040a9f907=17906, d18c83bfa097486294d9fa3a1b971772=85371, 48704d47f08b46c8a8bb72ab538e25df=12516, 55aa215ad22c4fd78465328309c5da05=16828, 61846958eebe4ce29468fb896f1a2293=108119, 1dd7375db6e44b4cb9348a13bf27e41d=15750, e5374bffccd343689e42fb271ce031d1=12516, 7d5d51b14b854dcdacc74e523c1bc409=128835, e5948f51b41a4ed28bc5cd471d2a7870=17906, bb7ffbc1358c4049becadd4c793f949b=16828, 7f8f6cc08df74035afca16a80f5a6cbe=148418, d70ff4916a654d47a10612d1a7c2c0ab=12520, 2b052bd7cebe49d88353c0d61616d00a=17918, e2f15acee07e4f5bb6eda55e1dcc95b1=174424, aa876740ab7c4ab1bd08ccd626636a68=17918, e7b7c34ace794106997897796d67f94f=13602, 66a871fd92624203aa45d19f1f1f8973=20092] 2024-11-22T03:50:48,439 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/data/default/TestLogRolling-testLogRolling/37356075a74a1f471bfb82cfc4617ba2/recovered.edits/349.seqid, newMaxSeqId=349, maxSeqId=130 2024-11-22T03:50:48,440 INFO [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2. 2024-11-22T03:50:48,440 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 37356075a74a1f471bfb82cfc4617ba2: Waiting for close lock at 1732247448391Running coprocessor pre-close hooks at 1732247448391Disabling compacts and flushes for region at 1732247448391Disabling writes for close at 1732247448391Writing region close event to WAL at 1732247448435 (+44 ms)Running coprocessor post-close hooks at 1732247448440 (+5 ms)Closed at 1732247448440 2024-11-22T03:50:48,440 DEBUG [RS_CLOSE_REGION-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732247425799.37356075a74a1f471bfb82cfc4617ba2. 2024-11-22T03:50:48,579 INFO [RS:0;c85114ed5096:34883 {}] regionserver.HRegionServer(976): stopping server c85114ed5096,34883,1732247400539; all regions closed. 2024-11-22T03:50:48,580 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,580 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,580 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,580 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,580 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741834_1010 (size=8107) 2024-11-22T03:50:48,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741834_1010 (size=8107) 2024-11-22T03:50:48,585 DEBUG [RS:0;c85114ed5096:34883 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/oldWALs 2024-11-22T03:50:48,585 INFO [RS:0;c85114ed5096:34883 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c85114ed5096%2C34883%2C1732247400539.meta:.meta(num 1732247401288) 2024-11-22T03:50:48,585 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,586 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,586 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,586 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,586 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741883_1059 (size=780) 2024-11-22T03:50:48,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741883_1059 (size=780) 2024-11-22T03:50:48,590 DEBUG [RS:0;c85114ed5096:34883 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/oldWALs 2024-11-22T03:50:48,590 INFO [RS:0;c85114ed5096:34883 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c85114ed5096%2C34883%2C1732247400539:(num 1732247448355) 2024-11-22T03:50:48,590 DEBUG [RS:0;c85114ed5096:34883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:50:48,590 INFO [RS:0;c85114ed5096:34883 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:50:48,590 INFO [RS:0;c85114ed5096:34883 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:50:48,590 INFO [RS:0;c85114ed5096:34883 {}] hbase.ChoreService(370): Chore service for: regionserver/c85114ed5096:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T03:50:48,590 INFO [RS:0;c85114ed5096:34883 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:50:48,591 INFO [RS:0;c85114ed5096:34883 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34883 2024-11-22T03:50:48,591 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:50:48,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c85114ed5096,34883,1732247400539 2024-11-22T03:50:48,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:50:48,599 INFO [RS:0;c85114ed5096:34883 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:50:48,599 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c85114ed5096,34883,1732247400539] 2024-11-22T03:50:48,600 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c85114ed5096,34883,1732247400539 already deleted, retry=false 2024-11-22T03:50:48,600 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c85114ed5096,34883,1732247400539 expired; onlineServers=0 2024-11-22T03:50:48,600 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c85114ed5096,38717,1732247400498' ***** 2024-11-22T03:50:48,600 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T03:50:48,600 INFO [M:0;c85114ed5096:38717 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:50:48,600 INFO [M:0;c85114ed5096:38717 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:50:48,600 DEBUG [M:0;c85114ed5096:38717 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T03:50:48,601 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T03:50:48,601 DEBUG [M:0;c85114ed5096:38717 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T03:50:48,601 DEBUG [master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247400681 {}] cleaner.HFileCleaner(306): Exit Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247400681,5,FailOnTimeoutGroup] 2024-11-22T03:50:48,601 DEBUG [master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247400682 {}] cleaner.HFileCleaner(306): Exit Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247400682,5,FailOnTimeoutGroup] 2024-11-22T03:50:48,601 INFO [M:0;c85114ed5096:38717 {}] hbase.ChoreService(370): Chore service for: master/c85114ed5096:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T03:50:48,601 INFO [M:0;c85114ed5096:38717 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:50:48,601 DEBUG [M:0;c85114ed5096:38717 {}] master.HMaster(1795): Stopping service threads 2024-11-22T03:50:48,601 INFO [M:0;c85114ed5096:38717 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T03:50:48,601 INFO [M:0;c85114ed5096:38717 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:50:48,601 ERROR [M:0;c85114ed5096:38717 {}] procedure2.ProcedureExecutor(763): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:41565,5,PEWorkerGroup] 2024-11-22T03:50:48,601 INFO [M:0;c85114ed5096:38717 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T03:50:48,601 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T03:50:48,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T03:50:48,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:48,602 DEBUG [M:0;c85114ed5096:38717 {}] zookeeper.ZKUtil(347): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T03:50:48,602 WARN [M:0;c85114ed5096:38717 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T03:50:48,603 INFO [M:0;c85114ed5096:38717 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/.lastflushedseqids 2024-11-22T03:50:48,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741884_1060 (size=228) 2024-11-22T03:50:48,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741884_1060 (size=228) 2024-11-22T03:50:48,609 INFO [M:0;c85114ed5096:38717 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T03:50:48,609 INFO [M:0;c85114ed5096:38717 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T03:50:48,610 DEBUG [M:0;c85114ed5096:38717 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:50:48,610 INFO [M:0;c85114ed5096:38717 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:50:48,610 DEBUG [M:0;c85114ed5096:38717 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:50:48,610 DEBUG [M:0;c85114ed5096:38717 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:50:48,610 DEBUG [M:0;c85114ed5096:38717 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:50:48,610 INFO [M:0;c85114ed5096:38717 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.36 KB 2024-11-22T03:50:48,632 DEBUG [M:0;c85114ed5096:38717 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/904cc3855dc543bca0801d91097cc135 is 82, key is hbase:meta,,1/info:regioninfo/1732247401314/Put/seqid=0 2024-11-22T03:50:48,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741885_1061 (size=5672) 2024-11-22T03:50:48,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741885_1061 (size=5672) 2024-11-22T03:50:48,637 INFO [M:0;c85114ed5096:38717 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/904cc3855dc543bca0801d91097cc135 2024-11-22T03:50:48,656 DEBUG [M:0;c85114ed5096:38717 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4fe1b928b7ad4c648e7d5d8c4a148f39 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732247401754/Put/seqid=0 2024-11-22T03:50:48,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741886_1062 (size=7089) 2024-11-22T03:50:48,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741886_1062 (size=7089) 2024-11-22T03:50:48,661 INFO [M:0;c85114ed5096:38717 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4fe1b928b7ad4c648e7d5d8c4a148f39 2024-11-22T03:50:48,665 INFO [M:0;c85114ed5096:38717 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4fe1b928b7ad4c648e7d5d8c4a148f39 2024-11-22T03:50:48,680 DEBUG [M:0;c85114ed5096:38717 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9948bed01b524c14afeb276ed577a97e is 69, key is c85114ed5096,34883,1732247400539/rs:state/1732247400769/Put/seqid=0 2024-11-22T03:50:48,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741887_1063 (size=5156) 2024-11-22T03:50:48,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741887_1063 (size=5156) 2024-11-22T03:50:48,685 INFO [M:0;c85114ed5096:38717 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9948bed01b524c14afeb276ed577a97e 2024-11-22T03:50:48,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:50:48,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34883-0x100658d0c8f0001, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:50:48,700 INFO [RS:0;c85114ed5096:34883 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:50:48,700 INFO [RS:0;c85114ed5096:34883 {}] regionserver.HRegionServer(1031): Exiting; stopping=c85114ed5096,34883,1732247400539; zookeeper connection closed. 2024-11-22T03:50:48,700 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@14d8fa9f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@14d8fa9f 2024-11-22T03:50:48,700 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T03:50:48,704 DEBUG [M:0;c85114ed5096:38717 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/22af53bc8a384e4b9d8a7c0740328a6f is 52, key is load_balancer_on/state:d/1732247401363/Put/seqid=0 2024-11-22T03:50:48,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741888_1064 (size=5056) 2024-11-22T03:50:48,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741888_1064 (size=5056) 2024-11-22T03:50:48,709 INFO [M:0;c85114ed5096:38717 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/22af53bc8a384e4b9d8a7c0740328a6f 2024-11-22T03:50:48,714 DEBUG [M:0;c85114ed5096:38717 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/904cc3855dc543bca0801d91097cc135 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/904cc3855dc543bca0801d91097cc135 2024-11-22T03:50:48,719 INFO [M:0;c85114ed5096:38717 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/904cc3855dc543bca0801d91097cc135, entries=8, sequenceid=125, filesize=5.5 K 2024-11-22T03:50:48,720 DEBUG [M:0;c85114ed5096:38717 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4fe1b928b7ad4c648e7d5d8c4a148f39 as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4fe1b928b7ad4c648e7d5d8c4a148f39 2024-11-22T03:50:48,728 INFO [M:0;c85114ed5096:38717 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4fe1b928b7ad4c648e7d5d8c4a148f39 2024-11-22T03:50:48,728 INFO [M:0;c85114ed5096:38717 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4fe1b928b7ad4c648e7d5d8c4a148f39, entries=13, sequenceid=125, filesize=6.9 K 2024-11-22T03:50:48,729 DEBUG [M:0;c85114ed5096:38717 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9948bed01b524c14afeb276ed577a97e as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9948bed01b524c14afeb276ed577a97e 2024-11-22T03:50:48,746 INFO [M:0;c85114ed5096:38717 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9948bed01b524c14afeb276ed577a97e, entries=1, sequenceid=125, filesize=5.0 K 2024-11-22T03:50:48,748 DEBUG [M:0;c85114ed5096:38717 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/22af53bc8a384e4b9d8a7c0740328a6f as hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/22af53bc8a384e4b9d8a7c0740328a6f 2024-11-22T03:50:48,749 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:50:48,749 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T03:50:48,750 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-22T03:50:48,756 INFO [M:0;c85114ed5096:38717 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41565/user/jenkins/test-data/09a39f08-f296-c348-7d6a-57d06eb02a2a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/22af53bc8a384e4b9d8a7c0740328a6f, entries=1, sequenceid=125, filesize=4.9 K 2024-11-22T03:50:48,757 INFO [M:0;c85114ed5096:38717 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=125, compaction requested=false 2024-11-22T03:50:48,761 INFO [M:0;c85114ed5096:38717 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:50:48,761 DEBUG [M:0;c85114ed5096:38717 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732247448610Disabling compacts and flushes for region at 1732247448610Disabling writes for close at 1732247448610Obtaining lock to block concurrent updates at 1732247448610Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732247448610Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1732247448611 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732247448611Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732247448611Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732247448631 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732247448631Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732247448641 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732247448656 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732247448656Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732247448665 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732247448679 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732247448680 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732247448690 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732247448704 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732247448704Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56f1ae63: reopening flushed file at 1732247448714 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7af2a4: reopening flushed file at 1732247448719 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e7f6f0f: reopening flushed file at 1732247448728 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1bffb358: reopening flushed file at 1732247448746 (+18 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=125, compaction requested=false at 1732247448757 (+11 ms)Writing region close event to WAL at 1732247448761 (+4 ms)Closed at 1732247448761 2024-11-22T03:50:48,761 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,761 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,761 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,761 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,762 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:48,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43633 is added to blk_1073741830_1006 (size=61320) 2024-11-22T03:50:48,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41503 is added to blk_1073741830_1006 (size=61320) 2024-11-22T03:50:48,764 INFO [M:0;c85114ed5096:38717 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T03:50:48,764 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:50:48,764 INFO [M:0;c85114ed5096:38717 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38717 2024-11-22T03:50:48,764 INFO [M:0;c85114ed5096:38717 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:50:48,786 INFO [regionserver/c85114ed5096:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:50:48,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:50:48,866 INFO [M:0;c85114ed5096:38717 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:50:48,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38717-0x100658d0c8f0000, quorum=127.0.0.1:49967, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:50:48,870 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5f41f884{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:50:48,870 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3d701b2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:50:48,870 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:50:48,870 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@165b13fe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:50:48,870 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7326ff1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/hadoop.log.dir/,STOPPED} 2024-11-22T03:50:48,872 WARN [BP-1107785697-172.17.0.2-1732247399929 heartbeating to localhost/127.0.0.1:41565 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:50:48,872 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:50:48,872 WARN [BP-1107785697-172.17.0.2-1732247399929 heartbeating to localhost/127.0.0.1:41565 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1107785697-172.17.0.2-1732247399929 (Datanode Uuid 2851d919-f3e6-42f8-9a11-186e42430dbe) service to localhost/127.0.0.1:41565 2024-11-22T03:50:48,872 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:50:48,873 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/cluster_7ba9fba6-8b91-7904-6f67-0deb24c1911c/data/data3/current/BP-1107785697-172.17.0.2-1732247399929 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:50:48,873 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/cluster_7ba9fba6-8b91-7904-6f67-0deb24c1911c/data/data4/current/BP-1107785697-172.17.0.2-1732247399929 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:50:48,873 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:50:48,878 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d953028{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:50:48,878 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@424c4a45{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:50:48,878 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:50:48,878 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77b55185{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:50:48,878 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@107c9d2b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/hadoop.log.dir/,STOPPED} 2024-11-22T03:50:48,880 WARN [BP-1107785697-172.17.0.2-1732247399929 heartbeating to localhost/127.0.0.1:41565 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:50:48,880 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:50:48,880 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:50:48,880 WARN [BP-1107785697-172.17.0.2-1732247399929 heartbeating to localhost/127.0.0.1:41565 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1107785697-172.17.0.2-1732247399929 (Datanode Uuid 864e3c50-7fea-4e1d-81f8-9b165c94078c) service to localhost/127.0.0.1:41565 2024-11-22T03:50:48,880 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/cluster_7ba9fba6-8b91-7904-6f67-0deb24c1911c/data/data1/current/BP-1107785697-172.17.0.2-1732247399929 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:50:48,881 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/cluster_7ba9fba6-8b91-7904-6f67-0deb24c1911c/data/data2/current/BP-1107785697-172.17.0.2-1732247399929 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:50:48,881 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:50:48,886 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@48f98d26{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:50:48,886 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@61c0fae3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:50:48,886 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:50:48,886 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75b38c73{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:50:48,887 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b4e4996{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/hadoop.log.dir/,STOPPED} 2024-11-22T03:50:48,894 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T03:50:48,924 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T03:50:48,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:48,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:48,932 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=230 (was 205) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41565 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:41565 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:41565 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41565 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41565 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41565 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41565 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:41565 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:41565 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=515 (was 485) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=222 (was 31) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2748 (was 2576) - AvailableMemoryMB LEAK? - 2024-11-22T03:50:48,940 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=230, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=222, ProcessCount=11, AvailableMemoryMB=2749 2024-11-22T03:50:48,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T03:50:48,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/hadoop.log.dir so I do NOT create it in target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294 2024-11-22T03:50:48,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8c51203d-da85-47bd-4936-d2aaca15b635/hadoop.tmp.dir so I do NOT create it in target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294 2024-11-22T03:50:48,940 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/cluster_72a55cff-9cdc-b546-084c-7b1953f83103, deleteOnExit=true 2024-11-22T03:50:48,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T03:50:48,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/test.cache.data in system properties and HBase conf 2024-11-22T03:50:48,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T03:50:48,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/hadoop.log.dir in system properties and HBase conf 2024-11-22T03:50:48,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T03:50:48,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T03:50:48,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T03:50:48,941 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T03:50:48,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:50:48,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T03:50:48,942 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T03:50:48,942 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:50:48,942 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T03:50:48,942 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T03:50:48,942 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T03:50:48,942 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:50:48,942 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T03:50:48,942 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/nfs.dump.dir in system properties and HBase conf 2024-11-22T03:50:48,942 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/java.io.tmpdir in system properties and HBase conf 2024-11-22T03:50:48,942 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T03:50:48,942 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T03:50:48,942 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T03:50:48,956 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:50:48,995 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:50:48,998 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:50:48,999 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:50:48,999 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:50:48,999 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:50:49,001 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:50:49,002 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7da51f5c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:50:49,002 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ea4b83c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:50:49,095 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@50f1fb65{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/java.io.tmpdir/jetty-localhost-39137-hadoop-hdfs-3_4_1-tests_jar-_-any-2824907742104995659/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:50:49,095 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6a8e8c4d{HTTP/1.1, (http/1.1)}{localhost:39137} 2024-11-22T03:50:49,095 INFO [Time-limited test {}] server.Server(415): Started @286422ms 2024-11-22T03:50:49,106 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T03:50:49,145 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:50:49,147 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:50:49,147 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:50:49,147 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:50:49,147 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:50:49,148 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@351231b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:50:49,148 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e0377f7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:50:49,240 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@e32dd62{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/java.io.tmpdir/jetty-localhost-40107-hadoop-hdfs-3_4_1-tests_jar-_-any-1702251966531000436/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:50:49,241 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6b506c4a{HTTP/1.1, (http/1.1)}{localhost:40107} 2024-11-22T03:50:49,241 INFO [Time-limited test {}] server.Server(415): Started @286567ms 2024-11-22T03:50:49,241 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:50:49,268 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T03:50:49,270 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T03:50:49,273 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T03:50:49,273 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T03:50:49,273 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T03:50:49,274 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7546f00d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/hadoop.log.dir/,AVAILABLE} 2024-11-22T03:50:49,274 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6abd1419{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T03:50:49,302 WARN [Thread-2499 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/cluster_72a55cff-9cdc-b546-084c-7b1953f83103/data/data1/current/BP-249914748-172.17.0.2-1732247448959/current, will proceed with Du for space computation calculation, 2024-11-22T03:50:49,305 WARN [Thread-2500 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/cluster_72a55cff-9cdc-b546-084c-7b1953f83103/data/data2/current/BP-249914748-172.17.0.2-1732247448959/current, will proceed with Du for space computation calculation, 2024-11-22T03:50:49,322 WARN [Thread-2478 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:50:49,324 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5f36e1c96bbb203c with lease ID 0x268fb38ed7de840e: Processing first storage report for DS-159f85fe-b91a-4f50-835e-3c582c9d1fcd from datanode DatanodeRegistration(127.0.0.1:38109, datanodeUuid=5f403b81-ab7f-4f1f-b2d8-6572218da7d6, infoPort=44793, infoSecurePort=0, ipcPort=44345, storageInfo=lv=-57;cid=testClusterID;nsid=2051332067;c=1732247448959) 2024-11-22T03:50:49,324 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5f36e1c96bbb203c with lease ID 0x268fb38ed7de840e: from storage DS-159f85fe-b91a-4f50-835e-3c582c9d1fcd node DatanodeRegistration(127.0.0.1:38109, datanodeUuid=5f403b81-ab7f-4f1f-b2d8-6572218da7d6, infoPort=44793, infoSecurePort=0, ipcPort=44345, storageInfo=lv=-57;cid=testClusterID;nsid=2051332067;c=1732247448959), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:50:49,324 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5f36e1c96bbb203c with lease ID 0x268fb38ed7de840e: Processing first storage report for DS-26a9db75-1895-410c-8375-b7359e00d553 from datanode DatanodeRegistration(127.0.0.1:38109, datanodeUuid=5f403b81-ab7f-4f1f-b2d8-6572218da7d6, infoPort=44793, infoSecurePort=0, ipcPort=44345, storageInfo=lv=-57;cid=testClusterID;nsid=2051332067;c=1732247448959) 2024-11-22T03:50:49,324 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5f36e1c96bbb203c with lease ID 0x268fb38ed7de840e: from storage DS-26a9db75-1895-410c-8375-b7359e00d553 node DatanodeRegistration(127.0.0.1:38109, datanodeUuid=5f403b81-ab7f-4f1f-b2d8-6572218da7d6, infoPort=44793, infoSecurePort=0, ipcPort=44345, storageInfo=lv=-57;cid=testClusterID;nsid=2051332067;c=1732247448959), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:50:49,374 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1a047db8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/java.io.tmpdir/jetty-localhost-45779-hadoop-hdfs-3_4_1-tests_jar-_-any-10064452620028770/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:50:49,375 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@51c3fa60{HTTP/1.1, (http/1.1)}{localhost:45779} 2024-11-22T03:50:49,375 INFO [Time-limited test {}] server.Server(415): Started @286701ms 2024-11-22T03:50:49,376 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T03:50:49,448 WARN [Thread-2525 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/cluster_72a55cff-9cdc-b546-084c-7b1953f83103/data/data3/current/BP-249914748-172.17.0.2-1732247448959/current, will proceed with Du for space computation calculation, 2024-11-22T03:50:49,449 WARN [Thread-2526 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/cluster_72a55cff-9cdc-b546-084c-7b1953f83103/data/data4/current/BP-249914748-172.17.0.2-1732247448959/current, will proceed with Du for space computation calculation, 2024-11-22T03:50:49,466 WARN [Thread-2514 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T03:50:49,468 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x59e23f9ab0757a81 with lease ID 0x268fb38ed7de840f: Processing first storage report for DS-9430c512-dce2-446e-a7c6-f65226a711df from datanode DatanodeRegistration(127.0.0.1:39779, datanodeUuid=d2363938-d921-435c-b7aa-14d012a1afeb, infoPort=38141, infoSecurePort=0, ipcPort=36159, storageInfo=lv=-57;cid=testClusterID;nsid=2051332067;c=1732247448959) 2024-11-22T03:50:49,468 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x59e23f9ab0757a81 with lease ID 0x268fb38ed7de840f: from storage DS-9430c512-dce2-446e-a7c6-f65226a711df node DatanodeRegistration(127.0.0.1:39779, datanodeUuid=d2363938-d921-435c-b7aa-14d012a1afeb, infoPort=38141, infoSecurePort=0, ipcPort=36159, storageInfo=lv=-57;cid=testClusterID;nsid=2051332067;c=1732247448959), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:50:49,468 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x59e23f9ab0757a81 with lease ID 0x268fb38ed7de840f: Processing first storage report for DS-883b2197-30d7-4763-9de4-348136e020cf from datanode DatanodeRegistration(127.0.0.1:39779, datanodeUuid=d2363938-d921-435c-b7aa-14d012a1afeb, infoPort=38141, infoSecurePort=0, ipcPort=36159, storageInfo=lv=-57;cid=testClusterID;nsid=2051332067;c=1732247448959) 2024-11-22T03:50:49,468 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x59e23f9ab0757a81 with lease ID 0x268fb38ed7de840f: from storage DS-883b2197-30d7-4763-9de4-348136e020cf node DatanodeRegistration(127.0.0.1:39779, datanodeUuid=d2363938-d921-435c-b7aa-14d012a1afeb, infoPort=38141, infoSecurePort=0, ipcPort=36159, storageInfo=lv=-57;cid=testClusterID;nsid=2051332067;c=1732247448959), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T03:50:49,502 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294 2024-11-22T03:50:49,505 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/cluster_72a55cff-9cdc-b546-084c-7b1953f83103/zookeeper_0, clientPort=52875, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/cluster_72a55cff-9cdc-b546-084c-7b1953f83103/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/cluster_72a55cff-9cdc-b546-084c-7b1953f83103/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T03:50:49,506 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52875 2024-11-22T03:50:49,506 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:50:49,507 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:50:49,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38109 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:50:49,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39779 is added to blk_1073741825_1001 (size=7) 2024-11-22T03:50:49,517 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3 with version=8 2024-11-22T03:50:49,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37663/user/jenkins/test-data/3a4dbebb-329d-eede-14c0-48b5f4f32984/hbase-staging 2024-11-22T03:50:49,519 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c85114ed5096:0 server-side Connection retries=45 2024-11-22T03:50:49,519 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:50:49,519 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:50:49,519 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:50:49,519 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:50:49,519 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:50:49,519 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T03:50:49,519 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:50:49,520 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38561 2024-11-22T03:50:49,520 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38561 connecting to ZooKeeper ensemble=127.0.0.1:52875 2024-11-22T03:50:49,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:385610x0, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:50:49,524 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38561-0x100658dcc0f0000 connected 2024-11-22T03:50:49,533 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:50:49,534 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:50:49,536 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:50:49,537 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3, hbase.cluster.distributed=false 2024-11-22T03:50:49,538 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:50:49,540 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38561 2024-11-22T03:50:49,540 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38561 2024-11-22T03:50:49,541 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38561 2024-11-22T03:50:49,544 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38561 2024-11-22T03:50:49,544 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38561 2024-11-22T03:50:49,561 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c85114ed5096:0 server-side Connection retries=45 2024-11-22T03:50:49,561 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:50:49,561 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T03:50:49,562 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T03:50:49,562 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T03:50:49,562 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T03:50:49,562 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T03:50:49,562 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T03:50:49,563 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46661 2024-11-22T03:50:49,564 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46661 connecting to ZooKeeper ensemble=127.0.0.1:52875 2024-11-22T03:50:49,565 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:50:49,568 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:50:49,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:466610x0, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T03:50:49,573 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46661-0x100658dcc0f0001 connected 2024-11-22T03:50:49,573 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:50:49,574 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T03:50:49,577 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T03:50:49,578 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T03:50:49,580 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T03:50:49,581 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46661 2024-11-22T03:50:49,581 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46661 2024-11-22T03:50:49,581 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46661 2024-11-22T03:50:49,586 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46661 2024-11-22T03:50:49,586 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46661 2024-11-22T03:50:49,597 DEBUG [M:0;c85114ed5096:38561 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c85114ed5096:38561 2024-11-22T03:50:49,598 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c85114ed5096,38561,1732247449518 2024-11-22T03:50:49,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:50:49,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:50:49,599 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c85114ed5096,38561,1732247449518 2024-11-22T03:50:49,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T03:50:49,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:49,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:49,600 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T03:50:49,601 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c85114ed5096,38561,1732247449518 from backup master directory 2024-11-22T03:50:49,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c85114ed5096,38561,1732247449518 2024-11-22T03:50:49,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:50:49,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T03:50:49,602 WARN [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:50:49,602 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c85114ed5096,38561,1732247449518 2024-11-22T03:50:49,606 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/hbase.id] with ID: a83293fe-32db-4d78-9689-4926fcb72773 2024-11-22T03:50:49,606 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/.tmp/hbase.id 2024-11-22T03:50:49,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39779 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:50:49,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38109 is added to blk_1073741826_1002 (size=42) 2024-11-22T03:50:49,619 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/.tmp/hbase.id]:[hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/hbase.id] 2024-11-22T03:50:49,632 INFO [master/c85114ed5096:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:50:49,632 INFO [master/c85114ed5096:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T03:50:49,633 INFO [master/c85114ed5096:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-22T03:50:49,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:49,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:49,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39779 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:50:49,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38109 is added to blk_1073741827_1003 (size=196) 2024-11-22T03:50:49,648 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T03:50:49,649 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T03:50:49,649 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:50:49,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38109 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:50:49,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39779 is added to blk_1073741828_1004 (size=1189) 2024-11-22T03:50:49,661 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store 2024-11-22T03:50:49,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39779 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:50:49,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38109 is added to blk_1073741829_1005 (size=34) 2024-11-22T03:50:49,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:49,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:50,076 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:50:50,076 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:50:50,076 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:50:50,076 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:50:50,076 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:50:50,076 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:50:50,076 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:50:50,076 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732247450076Disabling compacts and flushes for region at 1732247450076Disabling writes for close at 1732247450076Writing region close event to WAL at 1732247450076Closed at 1732247450076 2024-11-22T03:50:50,077 WARN [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/.initializing 2024-11-22T03:50:50,077 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/WALs/c85114ed5096,38561,1732247449518 2024-11-22T03:50:50,080 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C38561%2C1732247449518, suffix=, logDir=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/WALs/c85114ed5096,38561,1732247449518, archiveDir=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/oldWALs, maxLogs=10 2024-11-22T03:50:50,081 INFO [master/c85114ed5096:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C38561%2C1732247449518.1732247450081 2024-11-22T03:50:50,087 INFO [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/WALs/c85114ed5096,38561,1732247449518/c85114ed5096%2C38561%2C1732247449518.1732247450081 2024-11-22T03:50:50,087 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44793:44793),(127.0.0.1/127.0.0.1:38141:38141)] 2024-11-22T03:50:50,093 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:50:50,093 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:50:50,094 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:50,094 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:50,097 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:50,099 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T03:50:50,099 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:50,099 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:50:50,100 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:50,101 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T03:50:50,101 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:50,102 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:50:50,102 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:50,103 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T03:50:50,103 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:50,104 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:50:50,104 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:50,105 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T03:50:50,105 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:50,106 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T03:50:50,106 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:50,107 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:50,107 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:50,108 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:50,108 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:50,109 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T03:50:50,111 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T03:50:50,113 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:50:50,114 INFO [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=841056, jitterRate=0.06945843994617462}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T03:50:50,114 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732247450094Initializing all the Stores at 1732247450095 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247450095Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247450097 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247450097Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247450097Cleaning up temporary data from old regions at 1732247450108 (+11 ms)Region opened successfully at 1732247450114 (+6 ms) 2024-11-22T03:50:50,116 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T03:50:50,120 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4288280f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c85114ed5096/172.17.0.2:0 2024-11-22T03:50:50,121 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T03:50:50,121 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T03:50:50,121 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T03:50:50,122 INFO [master/c85114ed5096:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T03:50:50,122 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T03:50:50,123 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T03:50:50,123 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T03:50:50,131 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T03:50:50,132 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T03:50:50,132 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T03:50:50,133 INFO [master/c85114ed5096:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T03:50:50,133 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T03:50:50,134 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T03:50:50,134 INFO [master/c85114ed5096:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T03:50:50,135 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T03:50:50,136 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T03:50:50,137 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T03:50:50,138 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T03:50:50,139 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T03:50:50,140 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T03:50:50,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:50:50,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T03:50:50,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:50,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:50,142 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c85114ed5096,38561,1732247449518, sessionid=0x100658dcc0f0000, setting cluster-up flag (Was=false) 2024-11-22T03:50:50,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:50,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:50,147 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T03:50:50,148 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c85114ed5096,38561,1732247449518 2024-11-22T03:50:50,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:50,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:50,156 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T03:50:50,156 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c85114ed5096,38561,1732247449518 2024-11-22T03:50:50,162 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T03:50:50,163 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T03:50:50,164 INFO [master/c85114ed5096:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T03:50:50,164 INFO [master/c85114ed5096:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T03:50:50,164 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c85114ed5096,38561,1732247449518 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T03:50:50,166 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:50:50,166 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:50:50,166 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:50:50,166 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c85114ed5096:0, corePoolSize=5, maxPoolSize=5 2024-11-22T03:50:50,166 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c85114ed5096:0, corePoolSize=10, maxPoolSize=10 2024-11-22T03:50:50,166 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:50,166 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c85114ed5096:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:50:50,166 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:50,171 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:50:50,171 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T03:50:50,173 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:50,173 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732247480173 2024-11-22T03:50:50,173 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T03:50:50,173 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T03:50:50,173 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T03:50:50,173 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T03:50:50,173 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T03:50:50,173 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T03:50:50,173 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T03:50:50,174 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,174 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T03:50:50,174 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T03:50:50,174 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T03:50:50,176 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T03:50:50,177 INFO [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T03:50:50,177 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247450177,5,FailOnTimeoutGroup] 2024-11-22T03:50:50,181 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247450177,5,FailOnTimeoutGroup] 2024-11-22T03:50:50,181 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,181 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T03:50:50,181 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,181 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,191 INFO [RS:0;c85114ed5096:46661 {}] regionserver.HRegionServer(746): ClusterId : a83293fe-32db-4d78-9689-4926fcb72773 2024-11-22T03:50:50,191 DEBUG [RS:0;c85114ed5096:46661 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T03:50:50,193 DEBUG [RS:0;c85114ed5096:46661 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T03:50:50,193 DEBUG [RS:0;c85114ed5096:46661 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T03:50:50,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38109 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:50:50,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39779 is added to blk_1073741831_1007 (size=1321) 2024-11-22T03:50:50,196 DEBUG [RS:0;c85114ed5096:46661 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T03:50:50,196 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T03:50:50,196 DEBUG [RS:0;c85114ed5096:46661 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@364f7f8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c85114ed5096/172.17.0.2:0 2024-11-22T03:50:50,196 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3 2024-11-22T03:50:50,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38109 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:50:50,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39779 is added to blk_1073741832_1008 (size=32) 2024-11-22T03:50:50,211 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:50:50,213 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:50:50,215 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:50:50,215 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:50,215 DEBUG [RS:0;c85114ed5096:46661 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c85114ed5096:46661 2024-11-22T03:50:50,215 INFO [RS:0;c85114ed5096:46661 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T03:50:50,216 INFO [RS:0;c85114ed5096:46661 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T03:50:50,216 DEBUG [RS:0;c85114ed5096:46661 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T03:50:50,216 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:50:50,216 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:50:50,216 INFO [RS:0;c85114ed5096:46661 {}] regionserver.HRegionServer(2659): reportForDuty to master=c85114ed5096,38561,1732247449518 with port=46661, startcode=1732247449561 2024-11-22T03:50:50,217 DEBUG [RS:0;c85114ed5096:46661 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T03:50:50,218 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:50:50,218 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:50,218 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:50:50,218 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:50:50,220 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:50:50,220 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:50,221 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:50:50,221 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:50:50,224 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:50:50,224 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:50,225 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:50:50,225 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:50:50,226 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/data/hbase/meta/1588230740 2024-11-22T03:50:50,226 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48125, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T03:50:50,226 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/data/hbase/meta/1588230740 2024-11-22T03:50:50,227 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38561 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c85114ed5096,46661,1732247449561 2024-11-22T03:50:50,227 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38561 {}] master.ServerManager(517): Registering regionserver=c85114ed5096,46661,1732247449561 2024-11-22T03:50:50,229 DEBUG [RS:0;c85114ed5096:46661 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3 2024-11-22T03:50:50,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:50:50,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:50:50,229 DEBUG [RS:0;c85114ed5096:46661 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35659 2024-11-22T03:50:50,229 DEBUG [RS:0;c85114ed5096:46661 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T03:50:50,230 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:50:50,231 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:50:50,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:50:50,232 DEBUG [RS:0;c85114ed5096:46661 {}] zookeeper.ZKUtil(111): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c85114ed5096,46661,1732247449561 2024-11-22T03:50:50,232 WARN [RS:0;c85114ed5096:46661 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T03:50:50,232 INFO [RS:0;c85114ed5096:46661 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:50:50,232 DEBUG [RS:0;c85114ed5096:46661 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/WALs/c85114ed5096,46661,1732247449561 2024-11-22T03:50:50,234 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c85114ed5096,46661,1732247449561] 2024-11-22T03:50:50,238 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T03:50:50,238 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=841660, jitterRate=0.0702265202999115}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:50:50,239 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732247450211Initializing all the Stores at 1732247450212 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247450212Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247450213 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247450213Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247450213Cleaning up temporary data from old regions at 1732247450229 (+16 ms)Region opened successfully at 1732247450239 (+10 ms) 2024-11-22T03:50:50,239 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:50:50,239 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:50:50,239 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:50:50,239 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:50:50,239 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:50:50,240 INFO [RS:0;c85114ed5096:46661 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T03:50:50,240 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:50:50,240 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732247450239Disabling compacts and flushes for region at 1732247450239Disabling writes for close at 1732247450239Writing region close event to WAL at 1732247450240 (+1 ms)Closed at 1732247450240 2024-11-22T03:50:50,241 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:50:50,241 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T03:50:50,241 INFO [RS:0;c85114ed5096:46661 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T03:50:50,241 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T03:50:50,243 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:50:50,244 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T03:50:50,245 INFO [RS:0;c85114ed5096:46661 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T03:50:50,245 INFO [RS:0;c85114ed5096:46661 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,250 INFO [RS:0;c85114ed5096:46661 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T03:50:50,251 INFO [RS:0;c85114ed5096:46661 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T03:50:50,251 INFO [RS:0;c85114ed5096:46661 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,252 DEBUG [RS:0;c85114ed5096:46661 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:50,252 DEBUG [RS:0;c85114ed5096:46661 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:50,252 DEBUG [RS:0;c85114ed5096:46661 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:50,252 DEBUG [RS:0;c85114ed5096:46661 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:50,252 DEBUG [RS:0;c85114ed5096:46661 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:50,252 DEBUG [RS:0;c85114ed5096:46661 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c85114ed5096:0, corePoolSize=2, maxPoolSize=2 2024-11-22T03:50:50,252 DEBUG [RS:0;c85114ed5096:46661 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:50,252 DEBUG [RS:0;c85114ed5096:46661 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:50,252 DEBUG [RS:0;c85114ed5096:46661 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:50,252 DEBUG [RS:0;c85114ed5096:46661 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:50,252 DEBUG [RS:0;c85114ed5096:46661 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:50,252 DEBUG [RS:0;c85114ed5096:46661 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c85114ed5096:0, corePoolSize=1, maxPoolSize=1 2024-11-22T03:50:50,252 DEBUG [RS:0;c85114ed5096:46661 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c85114ed5096:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:50:50,252 DEBUG [RS:0;c85114ed5096:46661 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c85114ed5096:0, corePoolSize=3, maxPoolSize=3 2024-11-22T03:50:50,253 INFO [RS:0;c85114ed5096:46661 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,253 INFO [RS:0;c85114ed5096:46661 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,253 INFO [RS:0;c85114ed5096:46661 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,253 INFO [RS:0;c85114ed5096:46661 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,253 INFO [RS:0;c85114ed5096:46661 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,253 INFO [RS:0;c85114ed5096:46661 {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,46661,1732247449561-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:50:50,275 INFO [RS:0;c85114ed5096:46661 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T03:50:50,275 INFO [RS:0;c85114ed5096:46661 {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,46661,1732247449561-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,275 INFO [RS:0;c85114ed5096:46661 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,276 INFO [RS:0;c85114ed5096:46661 {}] regionserver.Replication(171): c85114ed5096,46661,1732247449561 started 2024-11-22T03:50:50,297 INFO [RS:0;c85114ed5096:46661 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,297 INFO [RS:0;c85114ed5096:46661 {}] regionserver.HRegionServer(1482): Serving as c85114ed5096,46661,1732247449561, RpcServer on c85114ed5096/172.17.0.2:46661, sessionid=0x100658dcc0f0001 2024-11-22T03:50:50,298 DEBUG [RS:0;c85114ed5096:46661 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T03:50:50,298 DEBUG [RS:0;c85114ed5096:46661 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c85114ed5096,46661,1732247449561 2024-11-22T03:50:50,298 DEBUG [RS:0;c85114ed5096:46661 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c85114ed5096,46661,1732247449561' 2024-11-22T03:50:50,298 DEBUG [RS:0;c85114ed5096:46661 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T03:50:50,299 DEBUG [RS:0;c85114ed5096:46661 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T03:50:50,299 DEBUG [RS:0;c85114ed5096:46661 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T03:50:50,299 DEBUG [RS:0;c85114ed5096:46661 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T03:50:50,299 DEBUG [RS:0;c85114ed5096:46661 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c85114ed5096,46661,1732247449561 2024-11-22T03:50:50,299 DEBUG [RS:0;c85114ed5096:46661 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c85114ed5096,46661,1732247449561' 2024-11-22T03:50:50,299 DEBUG [RS:0;c85114ed5096:46661 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T03:50:50,300 DEBUG [RS:0;c85114ed5096:46661 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T03:50:50,300 DEBUG [RS:0;c85114ed5096:46661 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T03:50:50,300 INFO [RS:0;c85114ed5096:46661 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T03:50:50,300 INFO [RS:0;c85114ed5096:46661 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T03:50:50,394 WARN [c85114ed5096:38561 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T03:50:50,402 INFO [RS:0;c85114ed5096:46661 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C46661%2C1732247449561, suffix=, logDir=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/WALs/c85114ed5096,46661,1732247449561, archiveDir=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/oldWALs, maxLogs=32 2024-11-22T03:50:50,403 INFO [RS:0;c85114ed5096:46661 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C46661%2C1732247449561.1732247450402 2024-11-22T03:50:50,409 INFO [RS:0;c85114ed5096:46661 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/WALs/c85114ed5096,46661,1732247449561/c85114ed5096%2C46661%2C1732247449561.1732247450402 2024-11-22T03:50:50,413 DEBUG [RS:0;c85114ed5096:46661 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44793:44793),(127.0.0.1/127.0.0.1:38141:38141)] 2024-11-22T03:50:50,644 DEBUG [c85114ed5096:38561 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T03:50:50,645 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c85114ed5096,46661,1732247449561 2024-11-22T03:50:50,646 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c85114ed5096,46661,1732247449561, state=OPENING 2024-11-22T03:50:50,647 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T03:50:50,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:50,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:50,648 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T03:50:50,648 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:50:50,648 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c85114ed5096,46661,1732247449561}] 2024-11-22T03:50:50,649 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:50:50,802 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T03:50:50,804 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36415, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T03:50:50,807 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T03:50:50,807 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:50:50,808 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c85114ed5096%2C46661%2C1732247449561.meta, suffix=.meta, logDir=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/WALs/c85114ed5096,46661,1732247449561, archiveDir=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/oldWALs, maxLogs=32 2024-11-22T03:50:50,809 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c85114ed5096%2C46661%2C1732247449561.meta.1732247450809.meta 2024-11-22T03:50:50,823 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/WALs/c85114ed5096,46661,1732247449561/c85114ed5096%2C46661%2C1732247449561.meta.1732247450809.meta 2024-11-22T03:50:50,828 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44793:44793),(127.0.0.1/127.0.0.1:38141:38141)] 2024-11-22T03:50:50,833 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T03:50:50,834 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T03:50:50,834 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T03:50:50,834 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T03:50:50,834 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T03:50:50,834 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T03:50:50,834 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T03:50:50,834 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T03:50:50,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T03:50:50,839 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T03:50:50,839 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:50,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:50:50,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T03:50:50,840 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T03:50:50,840 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:50,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:50:50,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T03:50:50,842 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T03:50:50,842 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:50,842 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:50:50,842 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T03:50:50,843 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T03:50:50,843 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T03:50:50,843 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T03:50:50,843 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T03:50:50,844 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/data/hbase/meta/1588230740 2024-11-22T03:50:50,845 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/data/hbase/meta/1588230740 2024-11-22T03:50:50,846 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T03:50:50,846 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T03:50:50,847 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T03:50:50,848 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T03:50:50,849 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=738520, jitterRate=-0.060924232006073}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T03:50:50,849 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T03:50:50,849 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732247450834Writing region info on filesystem at 1732247450834Initializing all the Stores at 1732247450836 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247450836Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247450837 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732247450837Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732247450837Cleaning up temporary data from old regions at 1732247450846 (+9 ms)Running coprocessor post-open hooks at 1732247450849 (+3 ms)Region opened successfully at 1732247450849 2024-11-22T03:50:50,851 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732247450801 2024-11-22T03:50:50,853 DEBUG [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T03:50:50,853 INFO [RS_OPEN_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T03:50:50,854 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c85114ed5096,46661,1732247449561 2024-11-22T03:50:50,855 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c85114ed5096,46661,1732247449561, state=OPEN 2024-11-22T03:50:50,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:50:50,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T03:50:50,857 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c85114ed5096,46661,1732247449561 2024-11-22T03:50:50,857 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:50:50,857 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T03:50:50,860 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T03:50:50,860 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c85114ed5096,46661,1732247449561 in 209 msec 2024-11-22T03:50:50,863 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T03:50:50,863 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 619 msec 2024-11-22T03:50:50,863 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T03:50:50,863 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T03:50:50,865 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:50:50,865 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c85114ed5096,46661,1732247449561, seqNum=-1] 2024-11-22T03:50:50,866 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:50:50,867 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48567, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:50:50,874 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 710 msec 2024-11-22T03:50:50,874 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732247450874, completionTime=-1 2024-11-22T03:50:50,874 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T03:50:50,874 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T03:50:50,876 INFO [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T03:50:50,876 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732247510876 2024-11-22T03:50:50,877 INFO [master/c85114ed5096:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732247570876 2024-11-22T03:50:50,877 INFO [master/c85114ed5096:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T03:50:50,877 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,38561,1732247449518-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,877 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,38561,1732247449518-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,877 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,38561,1732247449518-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,877 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c85114ed5096:38561, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,877 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,879 DEBUG [master/c85114ed5096:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T03:50:50,881 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,886 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.284sec 2024-11-22T03:50:50,887 INFO [master/c85114ed5096:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T03:50:50,887 INFO [master/c85114ed5096:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T03:50:50,887 INFO [master/c85114ed5096:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T03:50:50,887 INFO [master/c85114ed5096:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T03:50:50,887 INFO [master/c85114ed5096:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T03:50:50,887 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,38561,1732247449518-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T03:50:50,887 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,38561,1732247449518-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T03:50:50,889 DEBUG [master/c85114ed5096:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T03:50:50,890 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T03:50:50,890 INFO [master/c85114ed5096:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c85114ed5096,38561,1732247449518-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T03:50:50,891 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fbadfdb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:50:50,891 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c85114ed5096,38561,-1 for getting cluster id 2024-11-22T03:50:50,892 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T03:50:50,893 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a83293fe-32db-4d78-9689-4926fcb72773' 2024-11-22T03:50:50,894 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T03:50:50,894 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a83293fe-32db-4d78-9689-4926fcb72773" 2024-11-22T03:50:50,894 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27a3e655, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:50:50,894 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c85114ed5096,38561,-1] 2024-11-22T03:50:50,894 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T03:50:50,895 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:50:50,896 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59382, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T03:50:50,897 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1bf154bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T03:50:50,897 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T03:50:50,898 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c85114ed5096,46661,1732247449561, seqNum=-1] 2024-11-22T03:50:50,898 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T03:50:50,899 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60730, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T03:50:50,901 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c85114ed5096,38561,1732247449518 2024-11-22T03:50:50,901 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T03:50:50,905 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T03:50:50,905 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T03:50:50,907 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/WALs/test.com,8080,1, archiveDir=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/oldWALs, maxLogs=32 2024-11-22T03:50:50,907 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732247450907 2024-11-22T03:50:50,915 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/WALs/test.com,8080,1/test.com%2C8080%2C1.1732247450907 2024-11-22T03:50:50,921 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38141:38141),(127.0.0.1/127.0.0.1:44793:44793)] 2024-11-22T03:50:50,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,43241,1732247268913/c85114ed5096%2C43241%2C1732247268913.1732247269140 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:50,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43749/user/jenkins/test-data/e4fea127-4ee3-38d4-4454-aa174eb3a360/WALs/c85114ed5096,35197,1732247267957/c85114ed5096%2C35197%2C1732247267957.meta.1732247268744.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T03:50:50,928 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732247450928 2024-11-22T03:50:50,942 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:50,942 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:50,942 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:50,942 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:50,942 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:50,942 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/WALs/test.com,8080,1/test.com%2C8080%2C1.1732247450907 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/WALs/test.com,8080,1/test.com%2C8080%2C1.1732247450928 2024-11-22T03:50:50,947 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38141:38141),(127.0.0.1/127.0.0.1:44793:44793)] 2024-11-22T03:50:50,948 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/WALs/test.com,8080,1/test.com%2C8080%2C1.1732247450907 is not closed yet, will try archiving it next time 2024-11-22T03:50:50,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38109 is added to blk_1073741835_1011 (size=93) 2024-11-22T03:50:50,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39779 is added to blk_1073741835_1011 (size=93) 2024-11-22T03:50:50,949 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:50,949 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:50,949 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:50,949 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:50,949 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:50,953 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/WALs/test.com,8080,1/test.com%2C8080%2C1.1732247450907 to hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/oldWALs/test.com%2C8080%2C1.1732247450907 2024-11-22T03:50:50,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38109 is added to blk_1073741836_1012 (size=93) 2024-11-22T03:50:50,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39779 is added to blk_1073741836_1012 (size=93) 2024-11-22T03:50:50,966 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/oldWALs 2024-11-22T03:50:50,966 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732247450928) 2024-11-22T03:50:50,966 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T03:50:50,967 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:50:50,967 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:50:50,967 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:50:50,967 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:50:50,967 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T03:50:50,968 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T03:50:50,968 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=236212020, stopped=false 2024-11-22T03:50:50,968 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c85114ed5096,38561,1732247449518 2024-11-22T03:50:50,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:50:50,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T03:50:50,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:50,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:50,969 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:50:50,969 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T03:50:50,969 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:50:50,969 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:50:50,970 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c85114ed5096,46661,1732247449561' ***** 2024-11-22T03:50:50,970 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T03:50:50,970 INFO [RS:0;c85114ed5096:46661 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T03:50:50,970 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T03:50:50,970 INFO [RS:0;c85114ed5096:46661 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T03:50:50,970 INFO [RS:0;c85114ed5096:46661 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T03:50:50,970 INFO [RS:0;c85114ed5096:46661 {}] regionserver.HRegionServer(959): stopping server c85114ed5096,46661,1732247449561 2024-11-22T03:50:50,970 INFO [RS:0;c85114ed5096:46661 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:50:50,970 INFO [RS:0;c85114ed5096:46661 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c85114ed5096:46661. 2024-11-22T03:50:50,970 DEBUG [RS:0;c85114ed5096:46661 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T03:50:50,970 DEBUG [RS:0;c85114ed5096:46661 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:50:50,970 INFO [RS:0;c85114ed5096:46661 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T03:50:50,970 INFO [RS:0;c85114ed5096:46661 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T03:50:50,970 INFO [RS:0;c85114ed5096:46661 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T03:50:50,971 INFO [RS:0;c85114ed5096:46661 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T03:50:50,972 INFO [RS:0;c85114ed5096:46661 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-22T03:50:50,973 DEBUG [RS:0;c85114ed5096:46661 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-22T03:50:50,973 DEBUG [RS:0;c85114ed5096:46661 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-22T03:50:50,973 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T03:50:50,973 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T03:50:50,973 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T03:50:50,973 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T03:50:50,973 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T03:50:50,973 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-22T03:50:50,974 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:50:50,974 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T03:50:50,999 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/data/hbase/meta/1588230740/.tmp/ns/ab9bb778c60e4635974a6a42c7947c24 is 43, key is default/ns:d/1732247450868/Put/seqid=0 2024-11-22T03:50:51,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38109 is added to blk_1073741837_1013 (size=5153) 2024-11-22T03:50:51,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39779 is added to blk_1073741837_1013 (size=5153) 2024-11-22T03:50:51,004 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/data/hbase/meta/1588230740/.tmp/ns/ab9bb778c60e4635974a6a42c7947c24 2024-11-22T03:50:51,009 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/data/hbase/meta/1588230740/.tmp/ns/ab9bb778c60e4635974a6a42c7947c24 as hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/data/hbase/meta/1588230740/ns/ab9bb778c60e4635974a6a42c7947c24 2024-11-22T03:50:51,014 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/data/hbase/meta/1588230740/ns/ab9bb778c60e4635974a6a42c7947c24, entries=2, sequenceid=6, filesize=5.0 K 2024-11-22T03:50:51,015 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 42ms, sequenceid=6, compaction requested=false 2024-11-22T03:50:51,015 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T03:50:51,020 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T03:50:51,020 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T03:50:51,020 INFO [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T03:50:51,020 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732247450973Running coprocessor pre-close hooks at 1732247450973Disabling compacts and flushes for region at 1732247450973Disabling writes for close at 1732247450973Obtaining lock to block concurrent updates at 1732247450973Preparing flush snapshotting stores in 1588230740 at 1732247450973Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732247450976 (+3 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732247450976Flushing 1588230740/ns: creating writer at 1732247450977 (+1 ms)Flushing 1588230740/ns: appending metadata at 1732247450998 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1732247450998Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@797eca17: reopening flushed file at 1732247451009 (+11 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 42ms, sequenceid=6, compaction requested=false at 1732247451015 (+6 ms)Writing region close event to WAL at 1732247451016 (+1 ms)Running coprocessor post-close hooks at 1732247451020 (+4 ms)Closed at 1732247451020 2024-11-22T03:50:51,021 DEBUG [RS_CLOSE_META-regionserver/c85114ed5096:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T03:50:51,173 INFO [RS:0;c85114ed5096:46661 {}] regionserver.HRegionServer(976): stopping server c85114ed5096,46661,1732247449561; all regions closed. 2024-11-22T03:50:51,173 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:51,174 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:51,174 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:51,174 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:51,174 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:51,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39779 is added to blk_1073741834_1010 (size=1152) 2024-11-22T03:50:51,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38109 is added to blk_1073741834_1010 (size=1152) 2024-11-22T03:50:51,253 INFO [regionserver/c85114ed5096:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T03:50:51,253 INFO [regionserver/c85114ed5096:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T03:50:51,579 DEBUG [RS:0;c85114ed5096:46661 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/oldWALs 2024-11-22T03:50:51,579 INFO [RS:0;c85114ed5096:46661 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c85114ed5096%2C46661%2C1732247449561.meta:.meta(num 1732247450809) 2024-11-22T03:50:51,579 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:51,579 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:51,579 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:51,579 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:51,579 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:51,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39779 is added to blk_1073741833_1009 (size=93) 2024-11-22T03:50:51,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38109 is added to blk_1073741833_1009 (size=93) 2024-11-22T03:50:51,583 DEBUG [RS:0;c85114ed5096:46661 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/oldWALs 2024-11-22T03:50:51,583 INFO [RS:0;c85114ed5096:46661 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c85114ed5096%2C46661%2C1732247449561:(num 1732247450402) 2024-11-22T03:50:51,583 DEBUG [RS:0;c85114ed5096:46661 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T03:50:51,583 INFO [RS:0;c85114ed5096:46661 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T03:50:51,583 INFO [RS:0;c85114ed5096:46661 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:50:51,584 INFO [RS:0;c85114ed5096:46661 {}] hbase.ChoreService(370): Chore service for: regionserver/c85114ed5096:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T03:50:51,584 INFO [RS:0;c85114ed5096:46661 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:50:51,584 INFO [regionserver/c85114ed5096:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:50:51,584 INFO [RS:0;c85114ed5096:46661 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46661 2024-11-22T03:50:51,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T03:50:51,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c85114ed5096,46661,1732247449561 2024-11-22T03:50:51,585 INFO [RS:0;c85114ed5096:46661 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:50:51,586 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c85114ed5096,46661,1732247449561] 2024-11-22T03:50:51,587 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c85114ed5096,46661,1732247449561 already deleted, retry=false 2024-11-22T03:50:51,587 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c85114ed5096,46661,1732247449561 expired; onlineServers=0 2024-11-22T03:50:51,587 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c85114ed5096,38561,1732247449518' ***** 2024-11-22T03:50:51,587 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T03:50:51,587 INFO [M:0;c85114ed5096:38561 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T03:50:51,587 INFO [M:0;c85114ed5096:38561 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T03:50:51,587 DEBUG [M:0;c85114ed5096:38561 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T03:50:51,587 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T03:50:51,587 DEBUG [M:0;c85114ed5096:38561 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T03:50:51,587 DEBUG [master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247450177 {}] cleaner.HFileCleaner(306): Exit Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.small.0-1732247450177,5,FailOnTimeoutGroup] 2024-11-22T03:50:51,587 DEBUG [master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247450177 {}] cleaner.HFileCleaner(306): Exit Thread[master/c85114ed5096:0:becomeActiveMaster-HFileCleaner.large.0-1732247450177,5,FailOnTimeoutGroup] 2024-11-22T03:50:51,587 INFO [M:0;c85114ed5096:38561 {}] hbase.ChoreService(370): Chore service for: master/c85114ed5096:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T03:50:51,587 INFO [M:0;c85114ed5096:38561 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T03:50:51,587 DEBUG [M:0;c85114ed5096:38561 {}] master.HMaster(1795): Stopping service threads 2024-11-22T03:50:51,587 INFO [M:0;c85114ed5096:38561 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T03:50:51,587 INFO [M:0;c85114ed5096:38561 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T03:50:51,588 INFO [M:0;c85114ed5096:38561 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T03:50:51,588 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T03:50:51,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T03:50:51,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T03:50:51,588 DEBUG [M:0;c85114ed5096:38561 {}] zookeeper.ZKUtil(347): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T03:50:51,588 WARN [M:0;c85114ed5096:38561 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T03:50:51,589 INFO [M:0;c85114ed5096:38561 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/.lastflushedseqids 2024-11-22T03:50:51,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39779 is added to blk_1073741838_1014 (size=99) 2024-11-22T03:50:51,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38109 is added to blk_1073741838_1014 (size=99) 2024-11-22T03:50:51,594 INFO [M:0;c85114ed5096:38561 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T03:50:51,595 INFO [M:0;c85114ed5096:38561 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T03:50:51,595 DEBUG [M:0;c85114ed5096:38561 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T03:50:51,595 INFO [M:0;c85114ed5096:38561 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:50:51,595 DEBUG [M:0;c85114ed5096:38561 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:50:51,595 DEBUG [M:0;c85114ed5096:38561 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T03:50:51,595 DEBUG [M:0;c85114ed5096:38561 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:50:51,595 INFO [M:0;c85114ed5096:38561 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-22T03:50:51,609 DEBUG [M:0;c85114ed5096:38561 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/061ece4da65346898fd82526b7ea66e2 is 82, key is hbase:meta,,1/info:regioninfo/1732247450853/Put/seqid=0 2024-11-22T03:50:51,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39779 is added to blk_1073741839_1015 (size=5672) 2024-11-22T03:50:51,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38109 is added to blk_1073741839_1015 (size=5672) 2024-11-22T03:50:51,615 INFO [M:0;c85114ed5096:38561 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/061ece4da65346898fd82526b7ea66e2 2024-11-22T03:50:51,637 DEBUG [M:0;c85114ed5096:38561 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/215d2fb49c884601b9c3c4d5d09dacfd is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732247450873/Put/seqid=0 2024-11-22T03:50:51,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38109 is added to blk_1073741840_1016 (size=5275) 2024-11-22T03:50:51,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39779 is added to blk_1073741840_1016 (size=5275) 2024-11-22T03:50:51,642 INFO [M:0;c85114ed5096:38561 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/215d2fb49c884601b9c3c4d5d09dacfd 2024-11-22T03:50:51,664 DEBUG [M:0;c85114ed5096:38561 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/59a348ac20f84b08b65616f94bcaa8e5 is 69, key is c85114ed5096,46661,1732247449561/rs:state/1732247450227/Put/seqid=0 2024-11-22T03:50:51,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39779 is added to blk_1073741841_1017 (size=5156) 2024-11-22T03:50:51,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38109 is added to blk_1073741841_1017 (size=5156) 2024-11-22T03:50:51,673 INFO [M:0;c85114ed5096:38561 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/59a348ac20f84b08b65616f94bcaa8e5 2024-11-22T03:50:51,686 INFO [RS:0;c85114ed5096:46661 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:50:51,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:50:51,686 INFO [RS:0;c85114ed5096:46661 {}] regionserver.HRegionServer(1031): Exiting; stopping=c85114ed5096,46661,1732247449561; zookeeper connection closed. 2024-11-22T03:50:51,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46661-0x100658dcc0f0001, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:50:51,687 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1b8da5c6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1b8da5c6 2024-11-22T03:50:51,687 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T03:50:51,699 DEBUG [M:0;c85114ed5096:38561 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/efd1e49df36d401c9083cd03c737a11e is 52, key is load_balancer_on/state:d/1732247450904/Put/seqid=0 2024-11-22T03:50:51,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39779 is added to blk_1073741842_1018 (size=5056) 2024-11-22T03:50:51,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38109 is added to blk_1073741842_1018 (size=5056) 2024-11-22T03:50:51,704 INFO [M:0;c85114ed5096:38561 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/efd1e49df36d401c9083cd03c737a11e 2024-11-22T03:50:51,710 DEBUG [M:0;c85114ed5096:38561 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/061ece4da65346898fd82526b7ea66e2 as hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/061ece4da65346898fd82526b7ea66e2 2024-11-22T03:50:51,715 INFO [M:0;c85114ed5096:38561 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/061ece4da65346898fd82526b7ea66e2, entries=8, sequenceid=29, filesize=5.5 K 2024-11-22T03:50:51,716 DEBUG [M:0;c85114ed5096:38561 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/215d2fb49c884601b9c3c4d5d09dacfd as hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/215d2fb49c884601b9c3c4d5d09dacfd 2024-11-22T03:50:51,720 INFO [M:0;c85114ed5096:38561 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/215d2fb49c884601b9c3c4d5d09dacfd, entries=3, sequenceid=29, filesize=5.2 K 2024-11-22T03:50:51,721 DEBUG [M:0;c85114ed5096:38561 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/59a348ac20f84b08b65616f94bcaa8e5 as hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/59a348ac20f84b08b65616f94bcaa8e5 2024-11-22T03:50:51,725 INFO [M:0;c85114ed5096:38561 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/59a348ac20f84b08b65616f94bcaa8e5, entries=1, sequenceid=29, filesize=5.0 K 2024-11-22T03:50:51,726 DEBUG [M:0;c85114ed5096:38561 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/efd1e49df36d401c9083cd03c737a11e as hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/efd1e49df36d401c9083cd03c737a11e 2024-11-22T03:50:51,732 INFO [M:0;c85114ed5096:38561 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35659/user/jenkins/test-data/7102fe66-4e8b-5f8f-9cc1-8cb55188e2f3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/efd1e49df36d401c9083cd03c737a11e, entries=1, sequenceid=29, filesize=4.9 K 2024-11-22T03:50:51,733 INFO [M:0;c85114ed5096:38561 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=29, compaction requested=false 2024-11-22T03:50:51,734 INFO [M:0;c85114ed5096:38561 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T03:50:51,734 DEBUG [M:0;c85114ed5096:38561 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732247451595Disabling compacts and flushes for region at 1732247451595Disabling writes for close at 1732247451595Obtaining lock to block concurrent updates at 1732247451595Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732247451595Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732247451595Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732247451596 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732247451596Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732247451608 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732247451608Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732247451619 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732247451637 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732247451637Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732247451645 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732247451663 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732247451663Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732247451678 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732247451698 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732247451698Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34f26d7c: reopening flushed file at 1732247451709 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ed589bb: reopening flushed file at 1732247451715 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1edb98ba: reopening flushed file at 1732247451720 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79193fd8: reopening flushed file at 1732247451725 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=29, compaction requested=false at 1732247451733 (+8 ms)Writing region close event to WAL at 1732247451734 (+1 ms)Closed at 1732247451734 2024-11-22T03:50:51,736 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:51,736 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:51,736 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:51,736 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:51,736 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T03:50:51,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39779 is added to blk_1073741830_1006 (size=10311) 2024-11-22T03:50:51,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38109 is added to blk_1073741830_1006 (size=10311) 2024-11-22T03:50:51,742 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T03:50:51,742 INFO [M:0;c85114ed5096:38561 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T03:50:51,742 INFO [M:0;c85114ed5096:38561 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38561 2024-11-22T03:50:51,742 INFO [M:0;c85114ed5096:38561 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T03:50:51,844 INFO [M:0;c85114ed5096:38561 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T03:50:51,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:50:51,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38561-0x100658dcc0f0000, quorum=127.0.0.1:52875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T03:50:51,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1a047db8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:50:51,851 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@51c3fa60{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:50:51,851 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:50:51,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6abd1419{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:50:51,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7546f00d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/hadoop.log.dir/,STOPPED} 2024-11-22T03:50:51,853 WARN [BP-249914748-172.17.0.2-1732247448959 heartbeating to localhost/127.0.0.1:35659 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:50:51,853 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:50:51,853 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:50:51,853 WARN [BP-249914748-172.17.0.2-1732247448959 heartbeating to localhost/127.0.0.1:35659 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-249914748-172.17.0.2-1732247448959 (Datanode Uuid d2363938-d921-435c-b7aa-14d012a1afeb) service to localhost/127.0.0.1:35659 2024-11-22T03:50:51,853 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/cluster_72a55cff-9cdc-b546-084c-7b1953f83103/data/data3/current/BP-249914748-172.17.0.2-1732247448959 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:50:51,853 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/cluster_72a55cff-9cdc-b546-084c-7b1953f83103/data/data4/current/BP-249914748-172.17.0.2-1732247448959 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:50:51,854 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:50:51,862 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@e32dd62{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T03:50:51,863 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6b506c4a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:50:51,863 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:50:51,863 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e0377f7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:50:51,863 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@351231b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/hadoop.log.dir/,STOPPED} 2024-11-22T03:50:51,864 WARN [BP-249914748-172.17.0.2-1732247448959 heartbeating to localhost/127.0.0.1:35659 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T03:50:51,864 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T03:50:51,864 WARN [BP-249914748-172.17.0.2-1732247448959 heartbeating to localhost/127.0.0.1:35659 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-249914748-172.17.0.2-1732247448959 (Datanode Uuid 5f403b81-ab7f-4f1f-b2d8-6572218da7d6) service to localhost/127.0.0.1:35659 2024-11-22T03:50:51,864 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T03:50:51,865 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/cluster_72a55cff-9cdc-b546-084c-7b1953f83103/data/data1/current/BP-249914748-172.17.0.2-1732247448959 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:50:51,865 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/cluster_72a55cff-9cdc-b546-084c-7b1953f83103/data/data2/current/BP-249914748-172.17.0.2-1732247448959 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T03:50:51,865 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T03:50:51,871 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@50f1fb65{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T03:50:51,871 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6a8e8c4d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T03:50:51,871 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T03:50:51,871 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ea4b83c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T03:50:51,872 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7da51f5c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/19f73779-9cb0-0f03-4f21-a28d44962294/hadoop.log.dir/,STOPPED} 2024-11-22T03:50:51,878 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T03:50:51,897 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T03:50:51,907 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=268 (was 230) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35659 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35659 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35659 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:35659 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:35659 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:35659 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35659 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:35659 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=538 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=236 (was 222) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3550 (was 2749) - AvailableMemoryMB LEAK? -