2024-11-12 18:28:24,950 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-12 18:28:24,965 main DEBUG Took 0.012719 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-12 18:28:24,966 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-12 18:28:24,966 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-12 18:28:24,967 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-12 18:28:24,969 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:28:24,976 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-12 18:28:24,989 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:28:24,990 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:28:24,991 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:28:24,991 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:28:24,991 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:28:24,992 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:28:24,992 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:28:24,993 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:28:24,993 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:28:24,993 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:28:24,994 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:28:24,994 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:28:24,994 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:28:24,995 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:28:24,995 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:28:24,995 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:28:24,996 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:28:24,996 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:28:24,997 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:28:24,997 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:28:24,997 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:28:24,997 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:28:24,998 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:28:24,998 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 18:28:24,999 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:28:24,999 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-12 18:28:25,000 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 18:28:25,002 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-12 18:28:25,003 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-12 18:28:25,004 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-12 18:28:25,005 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-12 18:28:25,005 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-12 18:28:25,014 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-12 18:28:25,017 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-12 18:28:25,019 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-12 18:28:25,019 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-12 18:28:25,019 main DEBUG createAppenders(={Console}) 2024-11-12 18:28:25,020 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-12 18:28:25,020 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-12 18:28:25,021 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-12 18:28:25,021 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-12 18:28:25,021 main DEBUG OutputStream closed 2024-11-12 18:28:25,022 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-12 18:28:25,022 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-12 18:28:25,022 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-12 18:28:25,090 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-12 18:28:25,093 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-12 18:28:25,094 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-12 18:28:25,094 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-12 18:28:25,095 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-12 18:28:25,095 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-12 18:28:25,096 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-12 18:28:25,096 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-12 18:28:25,096 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-12 18:28:25,096 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-12 18:28:25,097 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-12 18:28:25,097 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-12 18:28:25,097 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-12 18:28:25,098 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-12 18:28:25,098 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-12 18:28:25,098 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-12 18:28:25,098 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-12 18:28:25,099 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-12 18:28:25,101 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-12 18:28:25,102 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-12 18:28:25,102 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-12 18:28:25,103 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-12T18:28:25,343 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403 2024-11-12 18:28:25,346 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-12 18:28:25,346 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-12T18:28:25,356 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-12T18:28:25,391 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=213, ProcessCount=11, AvailableMemoryMB=6866 2024-11-12T18:28:25,394 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-12T18:28:25,414 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/cluster_78433112-c5ed-e525-2622-27a7897f093a, deleteOnExit=true 2024-11-12T18:28:25,415 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-12T18:28:25,416 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/test.cache.data in system properties and HBase conf 2024-11-12T18:28:25,417 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/hadoop.tmp.dir in system properties and HBase conf 2024-11-12T18:28:25,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/hadoop.log.dir in system properties and HBase conf 2024-11-12T18:28:25,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-12T18:28:25,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-12T18:28:25,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-12T18:28:25,510 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-12T18:28:25,597 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-12T18:28:25,600 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-12T18:28:25,601 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-12T18:28:25,601 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-12T18:28:25,602 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T18:28:25,602 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-12T18:28:25,603 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-12T18:28:25,603 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T18:28:25,604 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T18:28:25,604 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-12T18:28:25,604 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/nfs.dump.dir in system properties and HBase conf 2024-11-12T18:28:25,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/java.io.tmpdir in system properties and HBase conf 2024-11-12T18:28:25,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T18:28:25,606 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-12T18:28:25,606 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-12T18:28:26,091 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-12T18:28:26,429 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-12T18:28:26,505 INFO [Time-limited test {}] log.Log(170): Logging initialized @2265ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-12T18:28:26,581 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:28:26,648 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:28:26,668 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:28:26,668 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:28:26,670 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:28:26,682 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:28:26,685 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d3e2ff3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:28:26,686 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@297967b5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:28:26,920 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@439445db{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/java.io.tmpdir/jetty-localhost-42257-hadoop-hdfs-3_4_1-tests_jar-_-any-7669214466173102372/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T18:28:26,927 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7d712b6{HTTP/1.1, (http/1.1)}{localhost:42257} 2024-11-12T18:28:26,928 INFO [Time-limited test {}] server.Server(415): Started @2689ms 2024-11-12T18:28:26,958 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-12T18:28:27,320 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:28:27,327 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:28:27,329 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:28:27,329 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:28:27,329 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:28:27,330 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27d2dfff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:28:27,331 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@489f2f9e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:28:27,452 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76a77789{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/java.io.tmpdir/jetty-localhost-43951-hadoop-hdfs-3_4_1-tests_jar-_-any-4760987181326202718/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:28:27,453 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@32415a66{HTTP/1.1, (http/1.1)}{localhost:43951} 2024-11-12T18:28:27,454 INFO [Time-limited test {}] server.Server(415): Started @3215ms 2024-11-12T18:28:27,517 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:28:27,650 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:28:27,658 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:28:27,659 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:28:27,659 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:28:27,659 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:28:27,660 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f1be96f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:28:27,661 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4450986c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:28:27,798 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@12a6bd7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/java.io.tmpdir/jetty-localhost-46167-hadoop-hdfs-3_4_1-tests_jar-_-any-14860824836588078946/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:28:27,798 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6b07a016{HTTP/1.1, (http/1.1)}{localhost:46167} 2024-11-12T18:28:27,799 INFO [Time-limited test {}] server.Server(415): Started @3560ms 2024-11-12T18:28:27,801 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:28:27,971 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/cluster_78433112-c5ed-e525-2622-27a7897f093a/data/data2/current/BP-997071308-172.17.0.3-1731436106189/current, will proceed with Du for space computation calculation, 2024-11-12T18:28:27,971 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/cluster_78433112-c5ed-e525-2622-27a7897f093a/data/data1/current/BP-997071308-172.17.0.3-1731436106189/current, will proceed with Du for space computation calculation, 2024-11-12T18:28:27,971 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/cluster_78433112-c5ed-e525-2622-27a7897f093a/data/data4/current/BP-997071308-172.17.0.3-1731436106189/current, will proceed with Du for space computation calculation, 2024-11-12T18:28:27,971 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/cluster_78433112-c5ed-e525-2622-27a7897f093a/data/data3/current/BP-997071308-172.17.0.3-1731436106189/current, will proceed with Du for space computation calculation, 2024-11-12T18:28:28,019 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:28:28,019 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:28:28,095 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6df55ca4eff34202 with lease ID 0xff01dc863775c267: Processing first storage report for DS-97736fff-e5f9-49fb-8079-737379e7c0ec from datanode DatanodeRegistration(127.0.0.1:36157, datanodeUuid=e993e43f-8a71-4985-8469-fd290cb9df97, infoPort=33635, infoSecurePort=0, ipcPort=44029, storageInfo=lv=-57;cid=testClusterID;nsid=146582482;c=1731436106189) 2024-11-12T18:28:28,096 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6df55ca4eff34202 with lease ID 0xff01dc863775c267: from storage DS-97736fff-e5f9-49fb-8079-737379e7c0ec node DatanodeRegistration(127.0.0.1:36157, datanodeUuid=e993e43f-8a71-4985-8469-fd290cb9df97, infoPort=33635, infoSecurePort=0, ipcPort=44029, storageInfo=lv=-57;cid=testClusterID;nsid=146582482;c=1731436106189), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-12T18:28:28,097 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5724f9973930fa2b with lease ID 0xff01dc863775c266: Processing first storage report for DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9 from datanode DatanodeRegistration(127.0.0.1:44269, datanodeUuid=d65c5f1a-bcab-4896-86d3-85e6c7d57584, infoPort=37551, infoSecurePort=0, ipcPort=36219, storageInfo=lv=-57;cid=testClusterID;nsid=146582482;c=1731436106189) 2024-11-12T18:28:28,097 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5724f9973930fa2b with lease ID 0xff01dc863775c266: from storage DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9 node DatanodeRegistration(127.0.0.1:44269, datanodeUuid=d65c5f1a-bcab-4896-86d3-85e6c7d57584, infoPort=37551, infoSecurePort=0, ipcPort=36219, storageInfo=lv=-57;cid=testClusterID;nsid=146582482;c=1731436106189), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-12T18:28:28,097 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6df55ca4eff34202 with lease ID 0xff01dc863775c267: Processing first storage report for DS-42b62907-1374-4494-8581-2ff0a1b075f6 from datanode DatanodeRegistration(127.0.0.1:36157, datanodeUuid=e993e43f-8a71-4985-8469-fd290cb9df97, infoPort=33635, infoSecurePort=0, ipcPort=44029, storageInfo=lv=-57;cid=testClusterID;nsid=146582482;c=1731436106189) 2024-11-12T18:28:28,097 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6df55ca4eff34202 with lease ID 0xff01dc863775c267: from storage DS-42b62907-1374-4494-8581-2ff0a1b075f6 node DatanodeRegistration(127.0.0.1:36157, datanodeUuid=e993e43f-8a71-4985-8469-fd290cb9df97, infoPort=33635, infoSecurePort=0, ipcPort=44029, storageInfo=lv=-57;cid=testClusterID;nsid=146582482;c=1731436106189), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:28:28,098 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5724f9973930fa2b with lease ID 0xff01dc863775c266: Processing first storage report for DS-affd7893-38e9-4a50-97e9-001d7b563c95 from datanode DatanodeRegistration(127.0.0.1:44269, datanodeUuid=d65c5f1a-bcab-4896-86d3-85e6c7d57584, infoPort=37551, infoSecurePort=0, ipcPort=36219, storageInfo=lv=-57;cid=testClusterID;nsid=146582482;c=1731436106189) 2024-11-12T18:28:28,098 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5724f9973930fa2b with lease ID 0xff01dc863775c266: from storage DS-affd7893-38e9-4a50-97e9-001d7b563c95 node DatanodeRegistration(127.0.0.1:44269, datanodeUuid=d65c5f1a-bcab-4896-86d3-85e6c7d57584, infoPort=37551, infoSecurePort=0, ipcPort=36219, storageInfo=lv=-57;cid=testClusterID;nsid=146582482;c=1731436106189), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-12T18:28:28,205 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403 2024-11-12T18:28:28,293 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/cluster_78433112-c5ed-e525-2622-27a7897f093a/zookeeper_0, clientPort=61753, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/cluster_78433112-c5ed-e525-2622-27a7897f093a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/cluster_78433112-c5ed-e525-2622-27a7897f093a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-12T18:28:28,307 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61753 2024-11-12T18:28:28,320 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:28:28,325 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:28:28,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741825_1001 (size=7) 2024-11-12T18:28:28,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741825_1001 (size=7) 2024-11-12T18:28:28,986 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e with version=8 2024-11-12T18:28:28,986 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/hbase-staging 2024-11-12T18:28:29,085 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-12T18:28:29,350 INFO [Time-limited test {}] client.ConnectionUtils(128): master/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:28:29,362 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:28:29,362 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:28:29,367 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:28:29,367 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:28:29,367 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:28:29,527 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-12T18:28:29,590 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-12T18:28:29,603 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-12T18:28:29,609 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:28:29,648 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 17080 (auto-detected) 2024-11-12T18:28:29,650 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-12T18:28:29,676 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41261 2024-11-12T18:28:29,707 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41261 connecting to ZooKeeper ensemble=127.0.0.1:61753 2024-11-12T18:28:29,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:412610x0, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:28:29,754 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41261-0x1003540648b0000 connected 2024-11-12T18:28:29,785 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:28:29,788 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:28:29,802 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:28:29,807 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e, hbase.cluster.distributed=false 2024-11-12T18:28:29,833 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:28:29,838 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41261 2024-11-12T18:28:29,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41261 2024-11-12T18:28:29,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41261 2024-11-12T18:28:29,840 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41261 2024-11-12T18:28:29,840 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41261 2024-11-12T18:28:29,965 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:28:29,968 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:28:29,968 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:28:29,968 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:28:29,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:28:29,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:28:29,973 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T18:28:29,976 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:28:29,977 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38061 2024-11-12T18:28:29,979 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38061 connecting to ZooKeeper ensemble=127.0.0.1:61753 2024-11-12T18:28:29,980 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:28:29,986 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:28:29,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:380610x0, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:28:29,996 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:380610x0, quorum=127.0.0.1:61753, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:28:30,001 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T18:28:30,001 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38061-0x1003540648b0001 connected 2024-11-12T18:28:30,010 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T18:28:30,012 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38061-0x1003540648b0001, quorum=127.0.0.1:61753, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T18:28:30,017 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38061-0x1003540648b0001, quorum=127.0.0.1:61753, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:28:30,019 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38061 2024-11-12T18:28:30,020 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38061 2024-11-12T18:28:30,021 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38061 2024-11-12T18:28:30,021 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38061 2024-11-12T18:28:30,022 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38061 2024-11-12T18:28:30,040 DEBUG [M:0;9911683f163c:41261 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;9911683f163c:41261 2024-11-12T18:28:30,041 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/9911683f163c,41261,1731436109143 2024-11-12T18:28:30,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1003540648b0001, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:28:30,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:28:30,052 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/9911683f163c,41261,1731436109143 2024-11-12T18:28:30,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1003540648b0001, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T18:28:30,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:28:30,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1003540648b0001, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:28:30,077 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-12T18:28:30,080 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/9911683f163c,41261,1731436109143 from backup master directory 2024-11-12T18:28:30,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1003540648b0001, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:28:30,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/9911683f163c,41261,1731436109143 2024-11-12T18:28:30,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:28:30,088 WARN [master/9911683f163c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:28:30,088 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=9911683f163c,41261,1731436109143 2024-11-12T18:28:30,090 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-12T18:28:30,091 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-12T18:28:30,156 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/hbase.id] with ID: 1d8bc21f-351a-4840-ad84-e24c02d4a7a8 2024-11-12T18:28:30,156 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/.tmp/hbase.id 2024-11-12T18:28:30,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741826_1002 (size=42) 2024-11-12T18:28:30,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741826_1002 (size=42) 2024-11-12T18:28:30,176 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/.tmp/hbase.id]:[hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/hbase.id] 2024-11-12T18:28:30,228 INFO [master/9911683f163c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:28:30,233 INFO [master/9911683f163c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-12T18:28:30,255 INFO [master/9911683f163c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 19ms. 2024-11-12T18:28:30,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1003540648b0001, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:28:30,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:28:30,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741827_1003 (size=196) 2024-11-12T18:28:30,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741827_1003 (size=196) 2024-11-12T18:28:30,298 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T18:28:30,300 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-12T18:28:30,306 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:28:30,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741828_1004 (size=1189) 2024-11-12T18:28:30,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741828_1004 (size=1189) 2024-11-12T18:28:30,367 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store 2024-11-12T18:28:30,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741829_1005 (size=34) 2024-11-12T18:28:30,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741829_1005 (size=34) 2024-11-12T18:28:30,400 INFO [master/9911683f163c:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-12T18:28:30,403 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:28:30,405 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T18:28:30,405 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:28:30,405 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:28:30,407 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T18:28:30,408 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:28:30,408 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:28:30,409 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731436110405Disabling compacts and flushes for region at 1731436110405Disabling writes for close at 1731436110408 (+3 ms)Writing region close event to WAL at 1731436110408Closed at 1731436110408 2024-11-12T18:28:30,411 WARN [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/.initializing 2024-11-12T18:28:30,412 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/WALs/9911683f163c,41261,1731436109143 2024-11-12T18:28:30,436 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C41261%2C1731436109143, suffix=, logDir=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/WALs/9911683f163c,41261,1731436109143, archiveDir=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/oldWALs, maxLogs=10 2024-11-12T18:28:30,447 INFO [master/9911683f163c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C41261%2C1731436109143.1731436110442 2024-11-12T18:28:30,469 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/WALs/9911683f163c,41261,1731436109143/9911683f163c%2C41261%2C1731436109143.1731436110442 2024-11-12T18:28:30,479 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37551:37551),(127.0.0.1/127.0.0.1:33635:33635)] 2024-11-12T18:28:30,481 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:28:30,482 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:28:30,486 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:28:30,487 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:28:30,531 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:28:30,560 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-12T18:28:30,565 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:28:30,568 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:28:30,569 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:28:30,572 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-12T18:28:30,572 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:28:30,574 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:28:30,574 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:28:30,577 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-12T18:28:30,577 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:28:30,579 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:28:30,579 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:28:30,582 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-12T18:28:30,582 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:28:30,583 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:28:30,584 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:28:30,588 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:28:30,589 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:28:30,595 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:28:30,595 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:28:30,600 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T18:28:30,603 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:28:30,608 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:28:30,609 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=698766, jitterRate=-0.11147375404834747}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T18:28:30,619 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731436110503Initializing all the Stores at 1731436110506 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436110506Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436110507 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436110507Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436110507Cleaning up temporary data from old regions at 1731436110595 (+88 ms)Region opened successfully at 1731436110619 (+24 ms) 2024-11-12T18:28:30,621 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-12T18:28:30,661 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bb29b94, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:28:30,695 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-12T18:28:30,708 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-12T18:28:30,708 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-12T18:28:30,713 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-12T18:28:30,714 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-12T18:28:30,720 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-12T18:28:30,720 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-12T18:28:30,750 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-12T18:28:30,760 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-12T18:28:30,762 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-12T18:28:30,765 INFO [master/9911683f163c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-12T18:28:30,767 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-12T18:28:30,769 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-12T18:28:30,772 INFO [master/9911683f163c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-12T18:28:30,776 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-12T18:28:30,778 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-12T18:28:30,780 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-12T18:28:30,781 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-12T18:28:30,800 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-12T18:28:30,802 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-12T18:28:30,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:28:30,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1003540648b0001, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:28:30,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1003540648b0001, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:28:30,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:28:30,810 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=9911683f163c,41261,1731436109143, sessionid=0x1003540648b0000, setting cluster-up flag (Was=false) 2024-11-12T18:28:30,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1003540648b0001, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:28:30,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:28:30,834 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-12T18:28:30,836 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9911683f163c,41261,1731436109143 2024-11-12T18:28:30,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:28:30,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1003540648b0001, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:28:30,850 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-12T18:28:30,852 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9911683f163c,41261,1731436109143 2024-11-12T18:28:30,869 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-12T18:28:30,927 INFO [RS:0;9911683f163c:38061 {}] regionserver.HRegionServer(746): ClusterId : 1d8bc21f-351a-4840-ad84-e24c02d4a7a8 2024-11-12T18:28:30,930 DEBUG [RS:0;9911683f163c:38061 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T18:28:30,936 DEBUG [RS:0;9911683f163c:38061 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T18:28:30,936 DEBUG [RS:0;9911683f163c:38061 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T18:28:30,940 DEBUG [RS:0;9911683f163c:38061 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T18:28:30,940 DEBUG [RS:0;9911683f163c:38061 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2519dfdb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:28:30,956 DEBUG [RS:0;9911683f163c:38061 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;9911683f163c:38061 2024-11-12T18:28:30,959 INFO [RS:0;9911683f163c:38061 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T18:28:30,959 INFO [RS:0;9911683f163c:38061 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T18:28:30,959 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-12T18:28:30,960 DEBUG [RS:0;9911683f163c:38061 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T18:28:30,962 INFO [RS:0;9911683f163c:38061 {}] regionserver.HRegionServer(2659): reportForDuty to master=9911683f163c,41261,1731436109143 with port=38061, startcode=1731436109919 2024-11-12T18:28:30,970 INFO [master/9911683f163c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-12T18:28:30,975 DEBUG [RS:0;9911683f163c:38061 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T18:28:30,978 INFO [master/9911683f163c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-12T18:28:30,985 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 9911683f163c,41261,1731436109143 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-12T18:28:30,993 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:28:30,993 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:28:30,993 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:28:30,993 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:28:30,993 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/9911683f163c:0, corePoolSize=10, maxPoolSize=10 2024-11-12T18:28:30,993 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:28:30,993 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:28:30,994 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:28:30,997 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731436140997 2024-11-12T18:28:30,999 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-12T18:28:30,999 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:28:31,000 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-12T18:28:31,000 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-12T18:28:31,004 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-12T18:28:31,004 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-12T18:28:31,005 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-12T18:28:31,005 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-12T18:28:31,007 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:28:31,007 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-12T18:28:31,006 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:31,020 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-12T18:28:31,021 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-12T18:28:31,022 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-12T18:28:31,033 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-12T18:28:31,034 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-12T18:28:31,041 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436111036,5,FailOnTimeoutGroup] 2024-11-12T18:28:31,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741831_1007 (size=1321) 2024-11-12T18:28:31,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741831_1007 (size=1321) 2024-11-12T18:28:31,047 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-12T18:28:31,048 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e 2024-11-12T18:28:31,048 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436111041,5,FailOnTimeoutGroup] 2024-11-12T18:28:31,049 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:31,050 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-12T18:28:31,052 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:31,052 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:31,070 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42765, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T18:28:31,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741832_1008 (size=32) 2024-11-12T18:28:31,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741832_1008 (size=32) 2024-11-12T18:28:31,079 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41261 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9911683f163c,38061,1731436109919 2024-11-12T18:28:31,080 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:28:31,083 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41261 {}] master.ServerManager(517): Registering regionserver=9911683f163c,38061,1731436109919 2024-11-12T18:28:31,083 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T18:28:31,087 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T18:28:31,087 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:28:31,088 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:28:31,089 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T18:28:31,091 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T18:28:31,092 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:28:31,093 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:28:31,093 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T18:28:31,096 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T18:28:31,096 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:28:31,097 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:28:31,097 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T18:28:31,101 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T18:28:31,101 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:28:31,102 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:28:31,102 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T18:28:31,103 DEBUG [RS:0;9911683f163c:38061 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e 2024-11-12T18:28:31,103 DEBUG [RS:0;9911683f163c:38061 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36309 2024-11-12T18:28:31,103 DEBUG [RS:0;9911683f163c:38061 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T18:28:31,104 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740 2024-11-12T18:28:31,104 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740 2024-11-12T18:28:31,108 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T18:28:31,108 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T18:28:31,109 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T18:28:31,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:28:31,112 DEBUG [RS:0;9911683f163c:38061 {}] zookeeper.ZKUtil(111): regionserver:38061-0x1003540648b0001, quorum=127.0.0.1:61753, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9911683f163c,38061,1731436109919 2024-11-12T18:28:31,112 WARN [RS:0;9911683f163c:38061 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:28:31,112 INFO [RS:0;9911683f163c:38061 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:28:31,113 DEBUG [RS:0;9911683f163c:38061 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919 2024-11-12T18:28:31,113 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T18:28:31,116 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9911683f163c,38061,1731436109919] 2024-11-12T18:28:31,123 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:28:31,124 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=846718, jitterRate=0.07665856182575226}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T18:28:31,127 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731436111080Initializing all the Stores at 1731436111082 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436111082Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436111083 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436111083Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436111083Cleaning up temporary data from old regions at 1731436111108 (+25 ms)Region opened successfully at 1731436111127 (+19 ms) 2024-11-12T18:28:31,128 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T18:28:31,128 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T18:28:31,128 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T18:28:31,128 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T18:28:31,128 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T18:28:31,130 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T18:28:31,130 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731436111128Disabling compacts and flushes for region at 1731436111128Disabling writes for close at 1731436111128Writing region close event to WAL at 1731436111129 (+1 ms)Closed at 1731436111130 (+1 ms) 2024-11-12T18:28:31,133 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:28:31,133 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-12T18:28:31,141 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-12T18:28:31,147 INFO [RS:0;9911683f163c:38061 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T18:28:31,151 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T18:28:31,155 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-12T18:28:31,163 INFO [RS:0;9911683f163c:38061 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T18:28:31,168 INFO [RS:0;9911683f163c:38061 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T18:28:31,168 INFO [RS:0;9911683f163c:38061 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:31,169 INFO [RS:0;9911683f163c:38061 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T18:28:31,176 INFO [RS:0;9911683f163c:38061 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T18:28:31,178 INFO [RS:0;9911683f163c:38061 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:31,178 DEBUG [RS:0;9911683f163c:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:28:31,178 DEBUG [RS:0;9911683f163c:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:28:31,179 DEBUG [RS:0;9911683f163c:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:28:31,179 DEBUG [RS:0;9911683f163c:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:28:31,179 DEBUG [RS:0;9911683f163c:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:28:31,179 DEBUG [RS:0;9911683f163c:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:28:31,179 DEBUG [RS:0;9911683f163c:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:28:31,180 DEBUG [RS:0;9911683f163c:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:28:31,180 DEBUG [RS:0;9911683f163c:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:28:31,180 DEBUG [RS:0;9911683f163c:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:28:31,180 DEBUG [RS:0;9911683f163c:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:28:31,181 DEBUG [RS:0;9911683f163c:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:28:31,181 DEBUG [RS:0;9911683f163c:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:28:31,181 DEBUG [RS:0;9911683f163c:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:28:31,182 INFO [RS:0;9911683f163c:38061 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:31,182 INFO [RS:0;9911683f163c:38061 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:31,183 INFO [RS:0;9911683f163c:38061 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:31,183 INFO [RS:0;9911683f163c:38061 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:31,183 INFO [RS:0;9911683f163c:38061 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:31,183 INFO [RS:0;9911683f163c:38061 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,38061,1731436109919-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:28:31,209 INFO [RS:0;9911683f163c:38061 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T18:28:31,212 INFO [RS:0;9911683f163c:38061 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,38061,1731436109919-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:31,212 INFO [RS:0;9911683f163c:38061 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:31,213 INFO [RS:0;9911683f163c:38061 {}] regionserver.Replication(171): 9911683f163c,38061,1731436109919 started 2024-11-12T18:28:31,237 INFO [RS:0;9911683f163c:38061 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:31,238 INFO [RS:0;9911683f163c:38061 {}] regionserver.HRegionServer(1482): Serving as 9911683f163c,38061,1731436109919, RpcServer on 9911683f163c/172.17.0.3:38061, sessionid=0x1003540648b0001 2024-11-12T18:28:31,239 DEBUG [RS:0;9911683f163c:38061 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T18:28:31,239 DEBUG [RS:0;9911683f163c:38061 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9911683f163c,38061,1731436109919 2024-11-12T18:28:31,239 DEBUG [RS:0;9911683f163c:38061 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,38061,1731436109919' 2024-11-12T18:28:31,239 DEBUG [RS:0;9911683f163c:38061 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T18:28:31,241 DEBUG [RS:0;9911683f163c:38061 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T18:28:31,242 DEBUG [RS:0;9911683f163c:38061 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T18:28:31,242 DEBUG [RS:0;9911683f163c:38061 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T18:28:31,242 DEBUG [RS:0;9911683f163c:38061 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9911683f163c,38061,1731436109919 2024-11-12T18:28:31,242 DEBUG [RS:0;9911683f163c:38061 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,38061,1731436109919' 2024-11-12T18:28:31,242 DEBUG [RS:0;9911683f163c:38061 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T18:28:31,243 DEBUG [RS:0;9911683f163c:38061 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T18:28:31,244 DEBUG [RS:0;9911683f163c:38061 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T18:28:31,244 INFO [RS:0;9911683f163c:38061 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T18:28:31,244 INFO [RS:0;9911683f163c:38061 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T18:28:31,306 WARN [9911683f163c:41261 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-12T18:28:31,354 INFO [RS:0;9911683f163c:38061 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C38061%2C1731436109919, suffix=, logDir=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919, archiveDir=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/oldWALs, maxLogs=32 2024-11-12T18:28:31,356 INFO [RS:0;9911683f163c:38061 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C38061%2C1731436109919.1731436111356 2024-11-12T18:28:31,367 INFO [RS:0;9911683f163c:38061 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436111356 2024-11-12T18:28:31,369 DEBUG [RS:0;9911683f163c:38061 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33635:33635),(127.0.0.1/127.0.0.1:37551:37551)] 2024-11-12T18:28:31,559 DEBUG [9911683f163c:41261 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-12T18:28:31,571 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=9911683f163c,38061,1731436109919 2024-11-12T18:28:31,578 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9911683f163c,38061,1731436109919, state=OPENING 2024-11-12T18:28:31,587 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-12T18:28:31,592 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:28:31,592 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1003540648b0001, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:28:31,593 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:28:31,593 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:28:31,594 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T18:28:31,596 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=9911683f163c,38061,1731436109919}] 2024-11-12T18:28:31,773 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T18:28:31,776 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33453, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T18:28:31,787 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-12T18:28:31,788 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:28:31,792 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C38061%2C1731436109919.meta, suffix=.meta, logDir=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919, archiveDir=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/oldWALs, maxLogs=32 2024-11-12T18:28:31,794 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C38061%2C1731436109919.meta.1731436111794.meta 2024-11-12T18:28:31,803 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.meta.1731436111794.meta 2024-11-12T18:28:31,805 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37551:37551),(127.0.0.1/127.0.0.1:33635:33635)] 2024-11-12T18:28:31,807 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:28:31,809 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-12T18:28:31,813 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-12T18:28:31,820 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-12T18:28:31,827 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-12T18:28:31,828 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:28:31,829 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-12T18:28:31,829 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-12T18:28:31,833 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T18:28:31,835 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T18:28:31,835 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:28:31,836 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:28:31,836 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T18:28:31,838 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T18:28:31,838 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:28:31,839 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:28:31,839 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T18:28:31,841 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T18:28:31,841 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:28:31,842 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:28:31,842 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T18:28:31,844 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T18:28:31,844 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:28:31,845 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:28:31,845 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T18:28:31,847 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740 2024-11-12T18:28:31,850 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740 2024-11-12T18:28:31,853 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T18:28:31,853 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T18:28:31,854 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T18:28:31,858 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T18:28:31,860 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=744210, jitterRate=-0.05368892848491669}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T18:28:31,860 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-12T18:28:31,862 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731436111830Writing region info on filesystem at 1731436111830Initializing all the Stores at 1731436111832 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436111832Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436111833 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436111833Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436111833Cleaning up temporary data from old regions at 1731436111853 (+20 ms)Running coprocessor post-open hooks at 1731436111860 (+7 ms)Region opened successfully at 1731436111862 (+2 ms) 2024-11-12T18:28:31,870 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731436111763 2024-11-12T18:28:31,882 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-12T18:28:31,883 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-12T18:28:31,884 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=9911683f163c,38061,1731436109919 2024-11-12T18:28:31,887 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9911683f163c,38061,1731436109919, state=OPEN 2024-11-12T18:28:31,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:28:31,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1003540648b0001, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:28:31,892 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:28:31,893 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:28:31,893 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=9911683f163c,38061,1731436109919 2024-11-12T18:28:31,899 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-12T18:28:31,899 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=9911683f163c,38061,1731436109919 in 297 msec 2024-11-12T18:28:31,906 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-12T18:28:31,907 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 760 msec 2024-11-12T18:28:31,909 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:28:31,909 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-12T18:28:31,933 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T18:28:31,934 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9911683f163c,38061,1731436109919, seqNum=-1] 2024-11-12T18:28:31,957 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T18:28:31,960 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47489, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T18:28:31,982 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0790 sec 2024-11-12T18:28:31,983 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731436111983, completionTime=-1 2024-11-12T18:28:31,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-12T18:28:31,986 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-12T18:28:32,015 INFO [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-12T18:28:32,016 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731436172016 2024-11-12T18:28:32,016 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731436232016 2024-11-12T18:28:32,016 INFO [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 30 msec 2024-11-12T18:28:32,019 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,41261,1731436109143-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:32,019 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,41261,1731436109143-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:32,019 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,41261,1731436109143-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:32,021 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-9911683f163c:41261, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:32,022 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:32,023 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:32,029 DEBUG [master/9911683f163c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-12T18:28:32,053 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.965sec 2024-11-12T18:28:32,055 INFO [master/9911683f163c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-12T18:28:32,056 INFO [master/9911683f163c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-12T18:28:32,057 INFO [master/9911683f163c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-12T18:28:32,058 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-12T18:28:32,058 INFO [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-12T18:28:32,059 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,41261,1731436109143-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:28:32,059 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,41261,1731436109143-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-12T18:28:32,068 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-12T18:28:32,070 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-12T18:28:32,070 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,41261,1731436109143-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:28:32,142 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4acb3aad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:28:32,145 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-12T18:28:32,145 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-12T18:28:32,148 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 9911683f163c,41261,-1 for getting cluster id 2024-11-12T18:28:32,151 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-12T18:28:32,163 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '1d8bc21f-351a-4840-ad84-e24c02d4a7a8' 2024-11-12T18:28:32,167 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-12T18:28:32,167 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "1d8bc21f-351a-4840-ad84-e24c02d4a7a8" 2024-11-12T18:28:32,170 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5879d47e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:28:32,171 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9911683f163c,41261,-1] 2024-11-12T18:28:32,175 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-12T18:28:32,177 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:28:32,179 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41946, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-12T18:28:32,182 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a2d78f9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:28:32,183 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T18:28:32,191 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9911683f163c,38061,1731436109919, seqNum=-1] 2024-11-12T18:28:32,191 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T18:28:32,194 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49792, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T18:28:32,217 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=9911683f163c,41261,1731436109143 2024-11-12T18:28:32,217 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:28:32,225 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-12T18:28:32,230 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-12T18:28:32,235 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 9911683f163c,41261,1731436109143 2024-11-12T18:28:32,258 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3a95d6f2 2024-11-12T18:28:32,259 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-12T18:28:32,263 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41952, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-12T18:28:32,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41261 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-12T18:28:32,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41261 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-12T18:28:32,271 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41261 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T18:28:32,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41261 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-12T18:28:32,282 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-12T18:28:32,285 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41261 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-12T18:28:32,286 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:28:32,293 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-12T18:28:32,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41261 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T18:28:32,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741835_1011 (size=389) 2024-11-12T18:28:32,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741835_1011 (size=389) 2024-11-12T18:28:32,337 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a92152dc3880e93df0e4ccb280879010, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e 2024-11-12T18:28:32,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741836_1012 (size=72) 2024-11-12T18:28:32,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741836_1012 (size=72) 2024-11-12T18:28:32,349 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:28:32,349 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing a92152dc3880e93df0e4ccb280879010, disabling compactions & flushes 2024-11-12T18:28:32,349 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010. 2024-11-12T18:28:32,349 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010. 2024-11-12T18:28:32,349 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010. after waiting 0 ms 2024-11-12T18:28:32,349 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010. 2024-11-12T18:28:32,349 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010. 2024-11-12T18:28:32,349 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for a92152dc3880e93df0e4ccb280879010: Waiting for close lock at 1731436112349Disabling compacts and flushes for region at 1731436112349Disabling writes for close at 1731436112349Writing region close event to WAL at 1731436112349Closed at 1731436112349 2024-11-12T18:28:32,352 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-12T18:28:32,357 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731436112352"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731436112352"}]},"ts":"1731436112352"} 2024-11-12T18:28:32,363 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-12T18:28:32,365 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-12T18:28:32,368 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731436112366"}]},"ts":"1731436112366"} 2024-11-12T18:28:32,376 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-12T18:28:32,378 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a92152dc3880e93df0e4ccb280879010, ASSIGN}] 2024-11-12T18:28:32,381 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a92152dc3880e93df0e4ccb280879010, ASSIGN 2024-11-12T18:28:32,383 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a92152dc3880e93df0e4ccb280879010, ASSIGN; state=OFFLINE, location=9911683f163c,38061,1731436109919; forceNewPlan=false, retain=false 2024-11-12T18:28:32,535 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a92152dc3880e93df0e4ccb280879010, regionState=OPENING, regionLocation=9911683f163c,38061,1731436109919 2024-11-12T18:28:32,541 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a92152dc3880e93df0e4ccb280879010, ASSIGN because future has completed 2024-11-12T18:28:32,542 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a92152dc3880e93df0e4ccb280879010, server=9911683f163c,38061,1731436109919}] 2024-11-12T18:28:32,706 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010. 2024-11-12T18:28:32,706 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a92152dc3880e93df0e4ccb280879010, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010.', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:28:32,707 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling a92152dc3880e93df0e4ccb280879010 2024-11-12T18:28:32,707 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:28:32,707 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a92152dc3880e93df0e4ccb280879010 2024-11-12T18:28:32,707 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a92152dc3880e93df0e4ccb280879010 2024-11-12T18:28:32,710 INFO [StoreOpener-a92152dc3880e93df0e4ccb280879010-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a92152dc3880e93df0e4ccb280879010 2024-11-12T18:28:32,713 INFO [StoreOpener-a92152dc3880e93df0e4ccb280879010-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a92152dc3880e93df0e4ccb280879010 columnFamilyName info 2024-11-12T18:28:32,713 DEBUG [StoreOpener-a92152dc3880e93df0e4ccb280879010-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:28:32,714 INFO [StoreOpener-a92152dc3880e93df0e4ccb280879010-1 {}] regionserver.HStore(327): Store=a92152dc3880e93df0e4ccb280879010/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:28:32,715 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a92152dc3880e93df0e4ccb280879010 2024-11-12T18:28:32,716 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010 2024-11-12T18:28:32,717 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010 2024-11-12T18:28:32,717 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a92152dc3880e93df0e4ccb280879010 2024-11-12T18:28:32,717 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a92152dc3880e93df0e4ccb280879010 2024-11-12T18:28:32,720 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a92152dc3880e93df0e4ccb280879010 2024-11-12T18:28:32,723 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:28:32,724 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened a92152dc3880e93df0e4ccb280879010; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=817794, jitterRate=0.03987930715084076}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-12T18:28:32,724 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a92152dc3880e93df0e4ccb280879010 2024-11-12T18:28:32,725 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a92152dc3880e93df0e4ccb280879010: Running coprocessor pre-open hook at 1731436112707Writing region info on filesystem at 1731436112707Initializing all the Stores at 1731436112709 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436112709Cleaning up temporary data from old regions at 1731436112717 (+8 ms)Running coprocessor post-open hooks at 1731436112724 (+7 ms)Region opened successfully at 1731436112725 (+1 ms) 2024-11-12T18:28:32,727 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010., pid=6, masterSystemTime=1731436112699 2024-11-12T18:28:32,731 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010. 2024-11-12T18:28:32,731 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010. 2024-11-12T18:28:32,732 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a92152dc3880e93df0e4ccb280879010, regionState=OPEN, openSeqNum=2, regionLocation=9911683f163c,38061,1731436109919 2024-11-12T18:28:32,737 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a92152dc3880e93df0e4ccb280879010, server=9911683f163c,38061,1731436109919 because future has completed 2024-11-12T18:28:32,744 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-12T18:28:32,745 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a92152dc3880e93df0e4ccb280879010, server=9911683f163c,38061,1731436109919 in 197 msec 2024-11-12T18:28:32,749 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-12T18:28:32,749 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a92152dc3880e93df0e4ccb280879010, ASSIGN in 366 msec 2024-11-12T18:28:32,750 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-12T18:28:32,750 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731436112750"}]},"ts":"1731436112750"} 2024-11-12T18:28:32,754 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-12T18:28:32,756 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-12T18:28:32,759 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 483 msec 2024-11-12T18:28:37,331 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-12T18:28:37,376 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-12T18:28:37,377 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-12T18:28:39,586 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-12T18:28:39,586 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-12T18:28:39,588 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-12T18:28:39,588 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-12T18:28:39,589 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T18:28:39,589 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-12T18:28:39,589 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-12T18:28:39,589 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-12T18:28:42,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41261 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T18:28:42,308 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-12T18:28:42,311 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-12T18:28:42,318 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-12T18:28:42,318 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010. 2024-11-12T18:28:42,319 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C38061%2C1731436109919.1731436122319 2024-11-12T18:28:42,328 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:28:42,328 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:28:42,329 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:28:42,329 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:28:42,329 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:28:42,329 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436111356 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436122319 2024-11-12T18:28:42,331 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33635:33635),(127.0.0.1/127.0.0.1:37551:37551)] 2024-11-12T18:28:42,331 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436111356 is not closed yet, will try archiving it next time 2024-11-12T18:28:42,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741833_1009 (size=451) 2024-11-12T18:28:42,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741833_1009 (size=451) 2024-11-12T18:28:42,334 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436111356 to hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/oldWALs/9911683f163c%2C38061%2C1731436109919.1731436111356 2024-11-12T18:28:42,340 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010., hostname=9911683f163c,38061,1731436109919, seqNum=2] 2024-11-12T18:28:54,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38061 {}] regionserver.HRegion(8855): Flush requested on a92152dc3880e93df0e4ccb280879010 2024-11-12T18:28:54,378 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a92152dc3880e93df0e4ccb280879010 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-12T18:28:54,451 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/.tmp/info/6cc7fb9dfa9847c080249cdf01adccb6 is 1080, key is row0001/info:/1731436122342/Put/seqid=0 2024-11-12T18:28:54,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741838_1014 (size=12509) 2024-11-12T18:28:54,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741838_1014 (size=12509) 2024-11-12T18:28:54,464 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/.tmp/info/6cc7fb9dfa9847c080249cdf01adccb6 2024-11-12T18:28:54,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/.tmp/info/6cc7fb9dfa9847c080249cdf01adccb6 as hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/6cc7fb9dfa9847c080249cdf01adccb6 2024-11-12T18:28:54,523 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/6cc7fb9dfa9847c080249cdf01adccb6, entries=7, sequenceid=11, filesize=12.2 K 2024-11-12T18:28:54,530 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a92152dc3880e93df0e4ccb280879010 in 153ms, sequenceid=11, compaction requested=false 2024-11-12T18:28:54,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a92152dc3880e93df0e4ccb280879010: 2024-11-12T18:28:58,200 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-12T18:29:02,386 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C38061%2C1731436109919.1731436142385 2024-11-12T18:29:02,594 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:02,595 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:02,595 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:02,595 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:02,595 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:02,595 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:02,596 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436122319 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436142385 2024-11-12T18:29:02,597 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33635:33635),(127.0.0.1/127.0.0.1:37551:37551)] 2024-11-12T18:29:02,597 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436122319 is not closed yet, will try archiving it next time 2024-11-12T18:29:02,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741837_1013 (size=12399) 2024-11-12T18:29:02,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741837_1013 (size=12399) 2024-11-12T18:29:02,801 INFO [FSHLog-0-hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e-prefix:9911683f163c,38061,1731436109919 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:05,005 INFO [FSHLog-0-hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e-prefix:9911683f163c,38061,1731436109919 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:07,209 INFO [FSHLog-0-hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e-prefix:9911683f163c,38061,1731436109919 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:09,413 INFO [FSHLog-0-hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e-prefix:9911683f163c,38061,1731436109919 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:09,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38061 {}] regionserver.HRegion(8855): Flush requested on a92152dc3880e93df0e4ccb280879010 2024-11-12T18:29:09,414 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a92152dc3880e93df0e4ccb280879010 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-12T18:29:09,616 INFO [FSHLog-0-hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e-prefix:9911683f163c,38061,1731436109919 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:09,623 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/.tmp/info/4ee6131218874618a1a056ac6ebd5905 is 1080, key is row0008/info:/1731436136375/Put/seqid=0 2024-11-12T18:29:09,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741840_1016 (size=12509) 2024-11-12T18:29:09,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741840_1016 (size=12509) 2024-11-12T18:29:09,634 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/.tmp/info/4ee6131218874618a1a056ac6ebd5905 2024-11-12T18:29:09,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/.tmp/info/4ee6131218874618a1a056ac6ebd5905 as hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/4ee6131218874618a1a056ac6ebd5905 2024-11-12T18:29:09,658 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/4ee6131218874618a1a056ac6ebd5905, entries=7, sequenceid=21, filesize=12.2 K 2024-11-12T18:29:09,860 INFO [FSHLog-0-hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e-prefix:9911683f163c,38061,1731436109919 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:09,860 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a92152dc3880e93df0e4ccb280879010 in 446ms, sequenceid=21, compaction requested=false 2024-11-12T18:29:09,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a92152dc3880e93df0e4ccb280879010: 2024-11-12T18:29:09,861 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-12T18:29:09,861 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:29:09,861 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/6cc7fb9dfa9847c080249cdf01adccb6 because midkey is the same as first or last row 2024-11-12T18:29:11,618 INFO [FSHLog-0-hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e-prefix:9911683f163c,38061,1731436109919 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:12,570 INFO [master/9911683f163c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-12T18:29:12,570 INFO [master/9911683f163c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-12T18:29:13,822 INFO [FSHLog-0-hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e-prefix:9911683f163c,38061,1731436109919 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:13,824 WARN [FSHLog-0-hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e-prefix:9911683f163c,38061,1731436109919 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:13,825 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 9911683f163c%2C38061%2C1731436109919:(num 1731436142385) roll requested 2024-11-12T18:29:13,825 INFO [regionserver/9911683f163c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C38061%2C1731436109919.1731436153825 2024-11-12T18:29:14,038 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 210 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:14,038 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:14,038 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:14,039 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:14,039 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:14,039 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:14,039 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436142385 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436153825 2024-11-12T18:29:14,040 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37551:37551),(127.0.0.1/127.0.0.1:33635:33635)] 2024-11-12T18:29:14,040 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436142385 is not closed yet, will try archiving it next time 2024-11-12T18:29:14,040 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436122319 to hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/oldWALs/9911683f163c%2C38061%2C1731436109919.1731436122319 2024-11-12T18:29:14,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741839_1015 (size=7739) 2024-11-12T18:29:14,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741839_1015 (size=7739) 2024-11-12T18:29:16,026 INFO [FSHLog-0-hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e-prefix:9911683f163c,38061,1731436109919 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK], DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK]] 2024-11-12T18:29:17,707 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a92152dc3880e93df0e4ccb280879010, had cached 0 bytes from a total of 25018 2024-11-12T18:29:18,230 INFO [FSHLog-0-hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e-prefix:9911683f163c,38061,1731436109919 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK], DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK]] 2024-11-12T18:29:20,434 INFO [FSHLog-0-hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e-prefix:9911683f163c,38061,1731436109919 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK], DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK]] 2024-11-12T18:29:22,639 INFO [FSHLog-0-hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e-prefix:9911683f163c,38061,1731436109919 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK], DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK]] 2024-11-12T18:29:24,641 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-12T18:29:24,641 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C38061%2C1731436109919.1731436164641 2024-11-12T18:29:28,200 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-12T18:29:29,650 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK], DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK]] 2024-11-12T18:29:29,652 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK], DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK]] 2024-11-12T18:29:29,652 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 9911683f163c%2C38061%2C1731436109919:(num 1731436164641) roll requested 2024-11-12T18:29:29,652 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:29,652 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:29,652 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:29,653 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:29,653 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:29,653 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436153825 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436164641 2024-11-12T18:29:29,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741841_1017 (size=4753) 2024-11-12T18:29:29,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741841_1017 (size=4753) 2024-11-12T18:29:29,658 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33635:33635),(127.0.0.1/127.0.0.1:37551:37551)] 2024-11-12T18:29:29,659 INFO [regionserver/9911683f163c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C38061%2C1731436109919.1731436169658 2024-11-12T18:29:34,662 INFO [FSHLog-0-hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e-prefix:9911683f163c,38061,1731436109919 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:34,662 WARN [FSHLog-0-hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e-prefix:9911683f163c,38061,1731436109919 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:34,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38061 {}] regionserver.HRegion(8855): Flush requested on a92152dc3880e93df0e4ccb280879010 2024-11-12T18:29:34,662 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a92152dc3880e93df0e4ccb280879010 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-12T18:29:34,667 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:34,667 WARN [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:36,663 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-12T18:29:39,665 INFO [FSHLog-0-hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e-prefix:9911683f163c,38061,1731436109919 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:39,665 WARN [FSHLog-0-hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e-prefix:9911683f163c,38061,1731436109919 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:39,665 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:39,665 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:39,665 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:39,665 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:39,665 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:39,666 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436164641 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436169658 2024-11-12T18:29:39,668 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33635:33635),(127.0.0.1/127.0.0.1:37551:37551)] 2024-11-12T18:29:39,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741842_1018 (size=1569) 2024-11-12T18:29:39,668 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436164641 is not closed yet, will try archiving it next time 2024-11-12T18:29:39,668 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 9911683f163c%2C38061%2C1731436109919:(num 1731436169658) roll requested 2024-11-12T18:29:39,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741842_1018 (size=1569) 2024-11-12T18:29:39,669 INFO [regionserver/9911683f163c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C38061%2C1731436109919.1731436179669 2024-11-12T18:29:39,671 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/.tmp/info/bcc2fd95365149bead0213a6f62d37cc is 1080, key is row0015/info:/1731436151416/Put/seqid=0 2024-11-12T18:29:39,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741844_1020 (size=12509) 2024-11-12T18:29:39,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741844_1020 (size=12509) 2024-11-12T18:29:39,687 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/.tmp/info/bcc2fd95365149bead0213a6f62d37cc 2024-11-12T18:29:39,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/.tmp/info/bcc2fd95365149bead0213a6f62d37cc as hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/bcc2fd95365149bead0213a6f62d37cc 2024-11-12T18:29:39,706 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/bcc2fd95365149bead0213a6f62d37cc, entries=7, sequenceid=31, filesize=12.2 K 2024-11-12T18:29:44,686 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:44,686 WARN [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:44,708 INFO [FSHLog-0-hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e-prefix:9911683f163c,38061,1731436109919 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:44,708 WARN [FSHLog-0-hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e-prefix:9911683f163c,38061,1731436109919 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36157,DS-97736fff-e5f9-49fb-8079-737379e7c0ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44269,DS-586b4bb4-3d62-4993-93f6-c0f2a21012c9,DISK]] 2024-11-12T18:29:44,708 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a92152dc3880e93df0e4ccb280879010 in 10046ms, sequenceid=31, compaction requested=true 2024-11-12T18:29:44,708 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:44,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a92152dc3880e93df0e4ccb280879010: 2024-11-12T18:29:44,709 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:44,709 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-12T18:29:44,709 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:29:44,709 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:44,709 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/6cc7fb9dfa9847c080249cdf01adccb6 because midkey is the same as first or last row 2024-11-12T18:29:44,709 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:44,709 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:44,709 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436169658 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436179669 2024-11-12T18:29:44,710 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37551:37551),(127.0.0.1/127.0.0.1:33635:33635)] 2024-11-12T18:29:44,710 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436169658 is not closed yet, will try archiving it next time 2024-11-12T18:29:44,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a92152dc3880e93df0e4ccb280879010:info, priority=-2147483648, current under compaction store size is 1 2024-11-12T18:29:44,710 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436142385 to hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/oldWALs/9911683f163c%2C38061%2C1731436109919.1731436142385 2024-11-12T18:29:44,711 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 9911683f163c%2C38061%2C1731436109919:(num 1731436179669) roll requested 2024-11-12T18:29:44,711 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C38061%2C1731436109919.1731436184711 2024-11-12T18:29:44,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:29:44,714 DEBUG [RS:0;9911683f163c:38061-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T18:29:44,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741843_1019 (size=438) 2024-11-12T18:29:44,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741843_1019 (size=438) 2024-11-12T18:29:44,715 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436153825 to hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/oldWALs/9911683f163c%2C38061%2C1731436109919.1731436153825 2024-11-12T18:29:44,717 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436164641 to hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/oldWALs/9911683f163c%2C38061%2C1731436109919.1731436164641 2024-11-12T18:29:44,718 DEBUG [RS:0;9911683f163c:38061-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T18:29:44,720 DEBUG [RS:0;9911683f163c:38061-shortCompactions-0 {}] regionserver.HStore(1541): a92152dc3880e93df0e4ccb280879010/info is initiating minor compaction (all files) 2024-11-12T18:29:44,720 INFO [RS:0;9911683f163c:38061-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a92152dc3880e93df0e4ccb280879010/info in TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010. 2024-11-12T18:29:44,721 INFO [RS:0;9911683f163c:38061-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/6cc7fb9dfa9847c080249cdf01adccb6, hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/4ee6131218874618a1a056ac6ebd5905, hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/bcc2fd95365149bead0213a6f62d37cc] into tmpdir=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/.tmp, totalSize=36.6 K 2024-11-12T18:29:44,722 DEBUG [RS:0;9911683f163c:38061-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6cc7fb9dfa9847c080249cdf01adccb6, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731436122342 2024-11-12T18:29:44,722 DEBUG [RS:0;9911683f163c:38061-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4ee6131218874618a1a056ac6ebd5905, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731436136375 2024-11-12T18:29:44,723 DEBUG [RS:0;9911683f163c:38061-shortCompactions-0 {}] compactions.Compactor(225): Compacting bcc2fd95365149bead0213a6f62d37cc, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731436151416 2024-11-12T18:29:44,730 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:44,730 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:44,730 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:44,731 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:44,731 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:44,731 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436179669 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436184711 2024-11-12T18:29:44,733 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37551:37551),(127.0.0.1/127.0.0.1:33635:33635)] 2024-11-12T18:29:44,734 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436169658 is not closed yet, will try archiving it next time 2024-11-12T18:29:44,734 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436179669 is not closed yet, will try archiving it next time 2024-11-12T18:29:44,734 INFO [regionserver/9911683f163c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C38061%2C1731436109919.1731436184734 2024-11-12T18:29:44,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741845_1021 (size=93) 2024-11-12T18:29:44,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741845_1021 (size=93) 2024-11-12T18:29:44,738 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436169658 is not closed yet, will try archiving it next time 2024-11-12T18:29:44,738 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436179669 to hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/oldWALs/9911683f163c%2C38061%2C1731436109919.1731436179669 2024-11-12T18:29:44,756 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:44,757 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:44,757 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:44,757 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:44,757 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:29:44,758 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436184711 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436184734 2024-11-12T18:29:44,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741846_1022 (size=1258) 2024-11-12T18:29:44,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741846_1022 (size=1258) 2024-11-12T18:29:44,762 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436169658 is not closed yet, will try archiving it next time 2024-11-12T18:29:44,765 INFO [RS:0;9911683f163c:38061-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a92152dc3880e93df0e4ccb280879010#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T18:29:44,766 DEBUG [RS:0;9911683f163c:38061-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/.tmp/info/56b7d0c0f1f64b4b81518c14eaf8fb18 is 1080, key is row0001/info:/1731436122342/Put/seqid=0 2024-11-12T18:29:44,780 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37551:37551),(127.0.0.1/127.0.0.1:33635:33635)] 2024-11-12T18:29:44,780 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436169658 is not closed yet, will try archiving it next time 2024-11-12T18:29:44,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741848_1024 (size=27710) 2024-11-12T18:29:44,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741848_1024 (size=27710) 2024-11-12T18:29:44,809 DEBUG [RS:0;9911683f163c:38061-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/.tmp/info/56b7d0c0f1f64b4b81518c14eaf8fb18 as hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/56b7d0c0f1f64b4b81518c14eaf8fb18 2024-11-12T18:29:44,838 INFO [RS:0;9911683f163c:38061-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a92152dc3880e93df0e4ccb280879010/info of a92152dc3880e93df0e4ccb280879010 into 56b7d0c0f1f64b4b81518c14eaf8fb18(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T18:29:44,838 DEBUG [RS:0;9911683f163c:38061-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a92152dc3880e93df0e4ccb280879010: 2024-11-12T18:29:44,840 INFO [RS:0;9911683f163c:38061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010., storeName=a92152dc3880e93df0e4ccb280879010/info, priority=13, startTime=1731436184710; duration=0sec 2024-11-12T18:29:44,840 DEBUG [RS:0;9911683f163c:38061-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-12T18:29:44,840 DEBUG [RS:0;9911683f163c:38061-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:29:44,841 DEBUG [RS:0;9911683f163c:38061-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/56b7d0c0f1f64b4b81518c14eaf8fb18 because midkey is the same as first or last row 2024-11-12T18:29:44,841 DEBUG [RS:0;9911683f163c:38061-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-12T18:29:44,841 DEBUG [RS:0;9911683f163c:38061-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:29:44,841 DEBUG [RS:0;9911683f163c:38061-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/56b7d0c0f1f64b4b81518c14eaf8fb18 because midkey is the same as first or last row 2024-11-12T18:29:44,841 DEBUG [RS:0;9911683f163c:38061-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-12T18:29:44,841 DEBUG [RS:0;9911683f163c:38061-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:29:44,841 DEBUG [RS:0;9911683f163c:38061-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/56b7d0c0f1f64b4b81518c14eaf8fb18 because midkey is the same as first or last row 2024-11-12T18:29:44,841 DEBUG [RS:0;9911683f163c:38061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:29:44,842 DEBUG [RS:0;9911683f163c:38061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a92152dc3880e93df0e4ccb280879010:info 2024-11-12T18:29:45,116 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/WALs/9911683f163c,38061,1731436109919/9911683f163c%2C38061%2C1731436109919.1731436169658 to hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/oldWALs/9911683f163c%2C38061%2C1731436109919.1731436169658 2024-11-12T18:29:56,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38061 {}] regionserver.HRegion(8855): Flush requested on a92152dc3880e93df0e4ccb280879010 2024-11-12T18:29:56,764 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a92152dc3880e93df0e4ccb280879010 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-12T18:29:56,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/.tmp/info/6bcf8cc6e6d04d329d856a0d7314565d is 1080, key is row0022/info:/1731436184736/Put/seqid=0 2024-11-12T18:29:56,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741849_1025 (size=12509) 2024-11-12T18:29:56,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741849_1025 (size=12509) 2024-11-12T18:29:56,785 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/.tmp/info/6bcf8cc6e6d04d329d856a0d7314565d 2024-11-12T18:29:56,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/.tmp/info/6bcf8cc6e6d04d329d856a0d7314565d as hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/6bcf8cc6e6d04d329d856a0d7314565d 2024-11-12T18:29:56,813 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/6bcf8cc6e6d04d329d856a0d7314565d, entries=7, sequenceid=42, filesize=12.2 K 2024-11-12T18:29:56,815 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a92152dc3880e93df0e4ccb280879010 in 52ms, sequenceid=42, compaction requested=false 2024-11-12T18:29:56,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a92152dc3880e93df0e4ccb280879010: 2024-11-12T18:29:56,815 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-12T18:29:56,815 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:29:56,816 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/56b7d0c0f1f64b4b81518c14eaf8fb18 because midkey is the same as first or last row 2024-11-12T18:29:58,201 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-12T18:30:02,708 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a92152dc3880e93df0e4ccb280879010, had cached 0 bytes from a total of 40219 2024-11-12T18:30:04,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-12T18:30:04,776 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T18:30:04,776 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:30:04,781 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:30:04,782 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:30:04,782 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-12T18:30:04,782 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-12T18:30:04,782 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=626812342, stopped=false 2024-11-12T18:30:04,783 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=9911683f163c,41261,1731436109143 2024-11-12T18:30:04,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:30:04,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1003540648b0001, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:30:04,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:04,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1003540648b0001, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:04,786 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T18:30:04,786 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T18:30:04,786 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:30:04,786 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:30:04,787 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38061-0x1003540648b0001, quorum=127.0.0.1:61753, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:30:04,787 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9911683f163c,38061,1731436109919' ***** 2024-11-12T18:30:04,787 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T18:30:04,787 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:30:04,787 INFO [RS:0;9911683f163c:38061 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T18:30:04,787 INFO [RS:0;9911683f163c:38061 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T18:30:04,787 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T18:30:04,788 INFO [RS:0;9911683f163c:38061 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T18:30:04,788 INFO [RS:0;9911683f163c:38061 {}] regionserver.HRegionServer(3091): Received CLOSE for a92152dc3880e93df0e4ccb280879010 2024-11-12T18:30:04,788 INFO [RS:0;9911683f163c:38061 {}] regionserver.HRegionServer(959): stopping server 9911683f163c,38061,1731436109919 2024-11-12T18:30:04,788 INFO [RS:0;9911683f163c:38061 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:30:04,789 INFO [RS:0;9911683f163c:38061 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;9911683f163c:38061. 2024-11-12T18:30:04,789 DEBUG [RS:0;9911683f163c:38061 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:30:04,789 DEBUG [RS:0;9911683f163c:38061 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:30:04,789 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a92152dc3880e93df0e4ccb280879010, disabling compactions & flushes 2024-11-12T18:30:04,789 INFO [RS:0;9911683f163c:38061 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T18:30:04,789 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010. 2024-11-12T18:30:04,789 INFO [RS:0;9911683f163c:38061 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T18:30:04,789 INFO [RS:0;9911683f163c:38061 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T18:30:04,789 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010. 2024-11-12T18:30:04,789 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010. after waiting 0 ms 2024-11-12T18:30:04,789 INFO [RS:0;9911683f163c:38061 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-12T18:30:04,789 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010. 2024-11-12T18:30:04,789 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing a92152dc3880e93df0e4ccb280879010 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-12T18:30:04,790 INFO [RS:0;9911683f163c:38061 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-12T18:30:04,790 DEBUG [RS:0;9911683f163c:38061 {}] regionserver.HRegionServer(1325): Online Regions={a92152dc3880e93df0e4ccb280879010=TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010., 1588230740=hbase:meta,,1.1588230740} 2024-11-12T18:30:04,790 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T18:30:04,790 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T18:30:04,790 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T18:30:04,790 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T18:30:04,790 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T18:30:04,790 DEBUG [RS:0;9911683f163c:38061 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a92152dc3880e93df0e4ccb280879010 2024-11-12T18:30:04,790 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-12T18:30:04,796 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/.tmp/info/c925cf9a28214863a26aaaff15d15267 is 1080, key is row0029/info:/1731436198766/Put/seqid=0 2024-11-12T18:30:04,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741850_1026 (size=8193) 2024-11-12T18:30:04,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741850_1026 (size=8193) 2024-11-12T18:30:04,804 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/.tmp/info/c925cf9a28214863a26aaaff15d15267 2024-11-12T18:30:04,814 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/.tmp/info/c925cf9a28214863a26aaaff15d15267 as hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/c925cf9a28214863a26aaaff15d15267 2024-11-12T18:30:04,816 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740/.tmp/info/595c54f75aa54327b652130d7d9a0b2c is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010./info:regioninfo/1731436112732/Put/seqid=0 2024-11-12T18:30:04,822 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/c925cf9a28214863a26aaaff15d15267, entries=3, sequenceid=48, filesize=8.0 K 2024-11-12T18:30:04,823 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for a92152dc3880e93df0e4ccb280879010 in 34ms, sequenceid=48, compaction requested=true 2024-11-12T18:30:04,824 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/6cc7fb9dfa9847c080249cdf01adccb6, hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/4ee6131218874618a1a056ac6ebd5905, hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/bcc2fd95365149bead0213a6f62d37cc] to archive 2024-11-12T18:30:04,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741851_1027 (size=7016) 2024-11-12T18:30:04,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741851_1027 (size=7016) 2024-11-12T18:30:04,827 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740/.tmp/info/595c54f75aa54327b652130d7d9a0b2c 2024-11-12T18:30:04,828 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T18:30:04,831 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/6cc7fb9dfa9847c080249cdf01adccb6 to hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/archive/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/6cc7fb9dfa9847c080249cdf01adccb6 2024-11-12T18:30:04,833 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/4ee6131218874618a1a056ac6ebd5905 to hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/archive/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/4ee6131218874618a1a056ac6ebd5905 2024-11-12T18:30:04,835 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/bcc2fd95365149bead0213a6f62d37cc to hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/archive/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/info/bcc2fd95365149bead0213a6f62d37cc 2024-11-12T18:30:04,848 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=9911683f163c:41261 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-12T18:30:04,853 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [6cc7fb9dfa9847c080249cdf01adccb6=12509, 4ee6131218874618a1a056ac6ebd5905=12509, bcc2fd95365149bead0213a6f62d37cc=12509] 2024-11-12T18:30:04,859 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740/.tmp/ns/e79591452e884f18abeaa21a5dc5a520 is 43, key is default/ns:d/1731436111964/Put/seqid=0 2024-11-12T18:30:04,862 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/default/TestLogRolling-testSlowSyncLogRolling/a92152dc3880e93df0e4ccb280879010/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-12T18:30:04,865 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010. 2024-11-12T18:30:04,865 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a92152dc3880e93df0e4ccb280879010: Waiting for close lock at 1731436204788Running coprocessor pre-close hooks at 1731436204789 (+1 ms)Disabling compacts and flushes for region at 1731436204789Disabling writes for close at 1731436204789Obtaining lock to block concurrent updates at 1731436204789Preparing flush snapshotting stores in a92152dc3880e93df0e4ccb280879010 at 1731436204789Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731436204790 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010. at 1731436204791 (+1 ms)Flushing a92152dc3880e93df0e4ccb280879010/info: creating writer at 1731436204791Flushing a92152dc3880e93df0e4ccb280879010/info: appending metadata at 1731436204795 (+4 ms)Flushing a92152dc3880e93df0e4ccb280879010/info: closing flushed file at 1731436204796 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@64056a37: reopening flushed file at 1731436204812 (+16 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for a92152dc3880e93df0e4ccb280879010 in 34ms, sequenceid=48, compaction requested=true at 1731436204823 (+11 ms)Writing region close event to WAL at 1731436204854 (+31 ms)Running coprocessor post-close hooks at 1731436204863 (+9 ms)Closed at 1731436204865 (+2 ms) 2024-11-12T18:30:04,866 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731436112265.a92152dc3880e93df0e4ccb280879010. 2024-11-12T18:30:04,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741852_1028 (size=5153) 2024-11-12T18:30:04,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741852_1028 (size=5153) 2024-11-12T18:30:04,868 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740/.tmp/ns/e79591452e884f18abeaa21a5dc5a520 2024-11-12T18:30:04,910 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740/.tmp/table/80d31028881849829ade413f608b8182 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731436112750/Put/seqid=0 2024-11-12T18:30:04,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741853_1029 (size=5396) 2024-11-12T18:30:04,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741853_1029 (size=5396) 2024-11-12T18:30:04,918 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740/.tmp/table/80d31028881849829ade413f608b8182 2024-11-12T18:30:04,929 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740/.tmp/info/595c54f75aa54327b652130d7d9a0b2c as hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740/info/595c54f75aa54327b652130d7d9a0b2c 2024-11-12T18:30:04,937 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740/info/595c54f75aa54327b652130d7d9a0b2c, entries=10, sequenceid=11, filesize=6.9 K 2024-11-12T18:30:04,938 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740/.tmp/ns/e79591452e884f18abeaa21a5dc5a520 as hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740/ns/e79591452e884f18abeaa21a5dc5a520 2024-11-12T18:30:04,946 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740/ns/e79591452e884f18abeaa21a5dc5a520, entries=2, sequenceid=11, filesize=5.0 K 2024-11-12T18:30:04,949 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740/.tmp/table/80d31028881849829ade413f608b8182 as hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740/table/80d31028881849829ade413f608b8182 2024-11-12T18:30:04,958 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740/table/80d31028881849829ade413f608b8182, entries=2, sequenceid=11, filesize=5.3 K 2024-11-12T18:30:04,960 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 169ms, sequenceid=11, compaction requested=false 2024-11-12T18:30:04,966 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-12T18:30:04,967 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T18:30:04,967 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T18:30:04,967 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731436204790Running coprocessor pre-close hooks at 1731436204790Disabling compacts and flushes for region at 1731436204790Disabling writes for close at 1731436204790Obtaining lock to block concurrent updates at 1731436204790Preparing flush snapshotting stores in 1588230740 at 1731436204790Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731436204791 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731436204792 (+1 ms)Flushing 1588230740/info: creating writer at 1731436204792Flushing 1588230740/info: appending metadata at 1731436204815 (+23 ms)Flushing 1588230740/info: closing flushed file at 1731436204815Flushing 1588230740/ns: creating writer at 1731436204836 (+21 ms)Flushing 1588230740/ns: appending metadata at 1731436204858 (+22 ms)Flushing 1588230740/ns: closing flushed file at 1731436204858Flushing 1588230740/table: creating writer at 1731436204877 (+19 ms)Flushing 1588230740/table: appending metadata at 1731436204909 (+32 ms)Flushing 1588230740/table: closing flushed file at 1731436204909Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f4e6b13: reopening flushed file at 1731436204926 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@bf05d43: reopening flushed file at 1731436204937 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78b2cd91: reopening flushed file at 1731436204946 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 169ms, sequenceid=11, compaction requested=false at 1731436204960 (+14 ms)Writing region close event to WAL at 1731436204961 (+1 ms)Running coprocessor post-close hooks at 1731436204967 (+6 ms)Closed at 1731436204967 2024-11-12T18:30:04,968 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-12T18:30:04,990 INFO [RS:0;9911683f163c:38061 {}] regionserver.HRegionServer(976): stopping server 9911683f163c,38061,1731436109919; all regions closed. 2024-11-12T18:30:04,992 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:04,992 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:04,992 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:04,992 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:04,992 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:04,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741834_1010 (size=3066) 2024-11-12T18:30:04,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741834_1010 (size=3066) 2024-11-12T18:30:04,999 DEBUG [RS:0;9911683f163c:38061 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/oldWALs 2024-11-12T18:30:04,999 INFO [RS:0;9911683f163c:38061 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9911683f163c%2C38061%2C1731436109919.meta:.meta(num 1731436111794) 2024-11-12T18:30:04,999 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:05,000 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:05,000 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:05,000 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:05,000 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:05,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741847_1023 (size=12695) 2024-11-12T18:30:05,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741847_1023 (size=12695) 2024-11-12T18:30:05,006 DEBUG [RS:0;9911683f163c:38061 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/oldWALs 2024-11-12T18:30:05,006 INFO [RS:0;9911683f163c:38061 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9911683f163c%2C38061%2C1731436109919:(num 1731436184734) 2024-11-12T18:30:05,006 DEBUG [RS:0;9911683f163c:38061 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:30:05,006 INFO [RS:0;9911683f163c:38061 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:30:05,007 INFO [RS:0;9911683f163c:38061 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:30:05,007 INFO [RS:0;9911683f163c:38061 {}] hbase.ChoreService(370): Chore service for: regionserver/9911683f163c:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-12T18:30:05,007 INFO [RS:0;9911683f163c:38061 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:30:05,007 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:30:05,008 INFO [RS:0;9911683f163c:38061 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38061 2024-11-12T18:30:05,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:30:05,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1003540648b0001, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9911683f163c,38061,1731436109919 2024-11-12T18:30:05,012 INFO [RS:0;9911683f163c:38061 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:30:05,013 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9911683f163c,38061,1731436109919] 2024-11-12T18:30:05,015 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9911683f163c,38061,1731436109919 already deleted, retry=false 2024-11-12T18:30:05,015 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9911683f163c,38061,1731436109919 expired; onlineServers=0 2024-11-12T18:30:05,016 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '9911683f163c,41261,1731436109143' ***** 2024-11-12T18:30:05,016 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-12T18:30:05,016 INFO [M:0;9911683f163c:41261 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:30:05,016 INFO [M:0;9911683f163c:41261 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:30:05,016 DEBUG [M:0;9911683f163c:41261 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-12T18:30:05,016 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-12T18:30:05,016 DEBUG [M:0;9911683f163c:41261 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-12T18:30:05,016 DEBUG [master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436111036 {}] cleaner.HFileCleaner(306): Exit Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436111036,5,FailOnTimeoutGroup] 2024-11-12T18:30:05,016 DEBUG [master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436111041 {}] cleaner.HFileCleaner(306): Exit Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436111041,5,FailOnTimeoutGroup] 2024-11-12T18:30:05,016 INFO [M:0;9911683f163c:41261 {}] hbase.ChoreService(370): Chore service for: master/9911683f163c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-12T18:30:05,017 INFO [M:0;9911683f163c:41261 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:30:05,017 DEBUG [M:0;9911683f163c:41261 {}] master.HMaster(1795): Stopping service threads 2024-11-12T18:30:05,017 INFO [M:0;9911683f163c:41261 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-12T18:30:05,017 INFO [M:0;9911683f163c:41261 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T18:30:05,017 INFO [M:0;9911683f163c:41261 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-12T18:30:05,018 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-12T18:30:05,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-12T18:30:05,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:05,018 DEBUG [M:0;9911683f163c:41261 {}] zookeeper.ZKUtil(347): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-12T18:30:05,019 WARN [M:0;9911683f163c:41261 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-12T18:30:05,019 INFO [M:0;9911683f163c:41261 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/.lastflushedseqids 2024-11-12T18:30:05,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741854_1030 (size=130) 2024-11-12T18:30:05,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741854_1030 (size=130) 2024-11-12T18:30:05,036 INFO [M:0;9911683f163c:41261 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-12T18:30:05,036 INFO [M:0;9911683f163c:41261 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-12T18:30:05,036 DEBUG [M:0;9911683f163c:41261 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T18:30:05,036 INFO [M:0;9911683f163c:41261 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:05,036 DEBUG [M:0;9911683f163c:41261 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:05,037 DEBUG [M:0;9911683f163c:41261 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T18:30:05,037 DEBUG [M:0;9911683f163c:41261 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:05,037 INFO [M:0;9911683f163c:41261 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-12T18:30:05,058 DEBUG [M:0;9911683f163c:41261 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f690a80fbab5475f861154fa524ea25a is 82, key is hbase:meta,,1/info:regioninfo/1731436111884/Put/seqid=0 2024-11-12T18:30:05,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741855_1031 (size=5672) 2024-11-12T18:30:05,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741855_1031 (size=5672) 2024-11-12T18:30:05,078 INFO [M:0;9911683f163c:41261 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f690a80fbab5475f861154fa524ea25a 2024-11-12T18:30:05,106 DEBUG [M:0;9911683f163c:41261 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/868d8809e3f74a298381061f23d7a553 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731436112758/Put/seqid=0 2024-11-12T18:30:05,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741856_1032 (size=6247) 2024-11-12T18:30:05,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741856_1032 (size=6247) 2024-11-12T18:30:05,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1003540648b0001, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:30:05,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1003540648b0001, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:30:05,115 INFO [M:0;9911683f163c:41261 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/868d8809e3f74a298381061f23d7a553 2024-11-12T18:30:05,116 INFO [RS:0;9911683f163c:38061 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:30:05,117 INFO [RS:0;9911683f163c:38061 {}] regionserver.HRegionServer(1031): Exiting; stopping=9911683f163c,38061,1731436109919; zookeeper connection closed. 2024-11-12T18:30:05,117 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3a6afcb1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3a6afcb1 2024-11-12T18:30:05,118 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-12T18:30:05,125 INFO [M:0;9911683f163c:41261 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 868d8809e3f74a298381061f23d7a553 2024-11-12T18:30:05,145 DEBUG [M:0;9911683f163c:41261 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8b8c6728372e4871b57d52b8e36c42ce is 69, key is 9911683f163c,38061,1731436109919/rs:state/1731436111086/Put/seqid=0 2024-11-12T18:30:05,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741857_1033 (size=5156) 2024-11-12T18:30:05,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741857_1033 (size=5156) 2024-11-12T18:30:05,153 INFO [M:0;9911683f163c:41261 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8b8c6728372e4871b57d52b8e36c42ce 2024-11-12T18:30:05,184 DEBUG [M:0;9911683f163c:41261 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/92e8d51deb814f439085b8c7782fd4ce is 52, key is load_balancer_on/state:d/1731436112221/Put/seqid=0 2024-11-12T18:30:05,187 INFO [regionserver/9911683f163c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:30:05,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741858_1034 (size=5056) 2024-11-12T18:30:05,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741858_1034 (size=5056) 2024-11-12T18:30:05,197 INFO [M:0;9911683f163c:41261 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/92e8d51deb814f439085b8c7782fd4ce 2024-11-12T18:30:05,206 DEBUG [M:0;9911683f163c:41261 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f690a80fbab5475f861154fa524ea25a as hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f690a80fbab5475f861154fa524ea25a 2024-11-12T18:30:05,213 INFO [M:0;9911683f163c:41261 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f690a80fbab5475f861154fa524ea25a, entries=8, sequenceid=59, filesize=5.5 K 2024-11-12T18:30:05,215 DEBUG [M:0;9911683f163c:41261 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/868d8809e3f74a298381061f23d7a553 as hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/868d8809e3f74a298381061f23d7a553 2024-11-12T18:30:05,221 INFO [M:0;9911683f163c:41261 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 868d8809e3f74a298381061f23d7a553 2024-11-12T18:30:05,221 INFO [M:0;9911683f163c:41261 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/868d8809e3f74a298381061f23d7a553, entries=6, sequenceid=59, filesize=6.1 K 2024-11-12T18:30:05,222 DEBUG [M:0;9911683f163c:41261 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8b8c6728372e4871b57d52b8e36c42ce as hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8b8c6728372e4871b57d52b8e36c42ce 2024-11-12T18:30:05,229 INFO [M:0;9911683f163c:41261 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8b8c6728372e4871b57d52b8e36c42ce, entries=1, sequenceid=59, filesize=5.0 K 2024-11-12T18:30:05,230 DEBUG [M:0;9911683f163c:41261 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/92e8d51deb814f439085b8c7782fd4ce as hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/92e8d51deb814f439085b8c7782fd4ce 2024-11-12T18:30:05,237 INFO [M:0;9911683f163c:41261 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/92e8d51deb814f439085b8c7782fd4ce, entries=1, sequenceid=59, filesize=4.9 K 2024-11-12T18:30:05,239 INFO [M:0;9911683f163c:41261 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 202ms, sequenceid=59, compaction requested=false 2024-11-12T18:30:05,241 INFO [M:0;9911683f163c:41261 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:05,241 DEBUG [M:0;9911683f163c:41261 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731436205036Disabling compacts and flushes for region at 1731436205036Disabling writes for close at 1731436205037 (+1 ms)Obtaining lock to block concurrent updates at 1731436205037Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731436205037Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731436205037Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731436205038 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731436205038Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731436205058 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731436205058Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731436205086 (+28 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731436205105 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731436205105Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731436205126 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731436205144 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731436205144Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731436205160 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731436205183 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731436205183Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f8ce948: reopening flushed file at 1731436205205 (+22 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@44909538: reopening flushed file at 1731436205213 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@119e5cd3: reopening flushed file at 1731436205221 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d0645e7: reopening flushed file at 1731436205229 (+8 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 202ms, sequenceid=59, compaction requested=false at 1731436205239 (+10 ms)Writing region close event to WAL at 1731436205241 (+2 ms)Closed at 1731436205241 2024-11-12T18:30:05,242 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:05,242 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:05,242 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:05,242 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:05,242 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:05,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36157 is added to blk_1073741830_1006 (size=27973) 2024-11-12T18:30:05,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44269 is added to blk_1073741830_1006 (size=27973) 2024-11-12T18:30:05,246 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:30:05,246 INFO [M:0;9911683f163c:41261 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-12T18:30:05,246 INFO [M:0;9911683f163c:41261 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41261 2024-11-12T18:30:05,247 INFO [M:0;9911683f163c:41261 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:30:05,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:30:05,349 INFO [M:0;9911683f163c:41261 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:30:05,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41261-0x1003540648b0000, quorum=127.0.0.1:61753, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:30:05,354 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@12a6bd7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:30:05,356 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6b07a016{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:30:05,356 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:30:05,357 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4450986c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:30:05,357 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f1be96f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/hadoop.log.dir/,STOPPED} 2024-11-12T18:30:05,360 WARN [BP-997071308-172.17.0.3-1731436106189 heartbeating to localhost/127.0.0.1:36309 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:30:05,360 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:30:05,360 WARN [BP-997071308-172.17.0.3-1731436106189 heartbeating to localhost/127.0.0.1:36309 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-997071308-172.17.0.3-1731436106189 (Datanode Uuid d65c5f1a-bcab-4896-86d3-85e6c7d57584) service to localhost/127.0.0.1:36309 2024-11-12T18:30:05,360 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:30:05,362 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/cluster_78433112-c5ed-e525-2622-27a7897f093a/data/data3/current/BP-997071308-172.17.0.3-1731436106189 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:30:05,362 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/cluster_78433112-c5ed-e525-2622-27a7897f093a/data/data4/current/BP-997071308-172.17.0.3-1731436106189 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:30:05,363 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:30:05,370 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76a77789{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:30:05,370 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@32415a66{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:30:05,370 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:30:05,370 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@489f2f9e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:30:05,370 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27d2dfff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/hadoop.log.dir/,STOPPED} 2024-11-12T18:30:05,372 WARN [BP-997071308-172.17.0.3-1731436106189 heartbeating to localhost/127.0.0.1:36309 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:30:05,372 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:30:05,372 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:30:05,372 WARN [BP-997071308-172.17.0.3-1731436106189 heartbeating to localhost/127.0.0.1:36309 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-997071308-172.17.0.3-1731436106189 (Datanode Uuid e993e43f-8a71-4985-8469-fd290cb9df97) service to localhost/127.0.0.1:36309 2024-11-12T18:30:05,373 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/cluster_78433112-c5ed-e525-2622-27a7897f093a/data/data1/current/BP-997071308-172.17.0.3-1731436106189 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:30:05,374 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/cluster_78433112-c5ed-e525-2622-27a7897f093a/data/data2/current/BP-997071308-172.17.0.3-1731436106189 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:30:05,374 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:30:05,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@439445db{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T18:30:05,383 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7d712b6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:30:05,383 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:30:05,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@297967b5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:30:05,384 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d3e2ff3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/hadoop.log.dir/,STOPPED} 2024-11-12T18:30:05,392 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-12T18:30:05,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-12T18:30:05,434 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@375b938a java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36309 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/9911683f163c:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/9911683f163c:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36309 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/9911683f163c:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (122747033) connection to localhost/127.0.0.1:36309 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (122747033) connection to localhost/127.0.0.1:36309 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:36309 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36309 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (122747033) connection to localhost/127.0.0.1:36309 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36309 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=154 (was 213), ProcessCount=11 (was 11), AvailableMemoryMB=6577 (was 6866) 2024-11-12T18:30:05,442 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=154, ProcessCount=11, AvailableMemoryMB=6575 2024-11-12T18:30:05,442 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-12T18:30:05,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/hadoop.log.dir so I do NOT create it in target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d 2024-11-12T18:30:05,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82062a0e-a99d-4fca-d98e-85c73da98403/hadoop.tmp.dir so I do NOT create it in target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d 2024-11-12T18:30:05,443 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/cluster_da7640f0-d730-e29e-4541-fbddbcd213df, deleteOnExit=true 2024-11-12T18:30:05,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-12T18:30:05,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/test.cache.data in system properties and HBase conf 2024-11-12T18:30:05,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/hadoop.tmp.dir in system properties and HBase conf 2024-11-12T18:30:05,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/hadoop.log.dir in system properties and HBase conf 2024-11-12T18:30:05,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-12T18:30:05,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-12T18:30:05,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-12T18:30:05,444 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-12T18:30:05,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-12T18:30:05,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-12T18:30:05,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-12T18:30:05,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T18:30:05,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-12T18:30:05,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-12T18:30:05,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T18:30:05,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T18:30:05,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-12T18:30:05,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/nfs.dump.dir in system properties and HBase conf 2024-11-12T18:30:05,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/java.io.tmpdir in system properties and HBase conf 2024-11-12T18:30:05,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T18:30:05,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-12T18:30:05,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-12T18:30:05,470 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-12T18:30:05,560 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:30:05,569 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:30:05,571 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:30:05,571 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:30:05,571 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:30:05,572 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:30:05,572 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58d2c7f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:30:05,573 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47276af2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:30:05,706 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@ec92eb9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/java.io.tmpdir/jetty-localhost-33281-hadoop-hdfs-3_4_1-tests_jar-_-any-10637342541358720279/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T18:30:05,707 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@338bb31f{HTTP/1.1, (http/1.1)}{localhost:33281} 2024-11-12T18:30:05,707 INFO [Time-limited test {}] server.Server(415): Started @101469ms 2024-11-12T18:30:05,721 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-12T18:30:05,798 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:30:05,802 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:30:05,802 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:30:05,802 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:30:05,802 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T18:30:05,803 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f2d2177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:30:05,804 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f8081e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:30:05,927 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7853b6ca{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/java.io.tmpdir/jetty-localhost-33935-hadoop-hdfs-3_4_1-tests_jar-_-any-10523046694104529832/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:30:05,928 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a9173f6{HTTP/1.1, (http/1.1)}{localhost:33935} 2024-11-12T18:30:05,928 INFO [Time-limited test {}] server.Server(415): Started @101690ms 2024-11-12T18:30:05,931 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:30:06,033 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:30:06,039 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:30:06,053 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:30:06,053 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:30:06,053 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:30:06,057 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4038b61b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:30:06,057 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7966340b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:30:06,121 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/cluster_da7640f0-d730-e29e-4541-fbddbcd213df/data/data1/current/BP-1733240676-172.17.0.3-1731436205494/current, will proceed with Du for space computation calculation, 2024-11-12T18:30:06,122 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/cluster_da7640f0-d730-e29e-4541-fbddbcd213df/data/data2/current/BP-1733240676-172.17.0.3-1731436205494/current, will proceed with Du for space computation calculation, 2024-11-12T18:30:06,165 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:30:06,169 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfec7c026a03b7889 with lease ID 0x7281ede58ba4aceb: Processing first storage report for DS-da035549-ebb9-4783-bbe1-68e985c0f931 from datanode DatanodeRegistration(127.0.0.1:44353, datanodeUuid=e1f6d8ff-f752-4bc5-bcb4-8ffcd0068a93, infoPort=44391, infoSecurePort=0, ipcPort=44947, storageInfo=lv=-57;cid=testClusterID;nsid=16590902;c=1731436205494) 2024-11-12T18:30:06,170 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfec7c026a03b7889 with lease ID 0x7281ede58ba4aceb: from storage DS-da035549-ebb9-4783-bbe1-68e985c0f931 node DatanodeRegistration(127.0.0.1:44353, datanodeUuid=e1f6d8ff-f752-4bc5-bcb4-8ffcd0068a93, infoPort=44391, infoSecurePort=0, ipcPort=44947, storageInfo=lv=-57;cid=testClusterID;nsid=16590902;c=1731436205494), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:30:06,170 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfec7c026a03b7889 with lease ID 0x7281ede58ba4aceb: Processing first storage report for DS-77f41145-dd64-4166-af9a-d64e09cbd89f from datanode DatanodeRegistration(127.0.0.1:44353, datanodeUuid=e1f6d8ff-f752-4bc5-bcb4-8ffcd0068a93, infoPort=44391, infoSecurePort=0, ipcPort=44947, storageInfo=lv=-57;cid=testClusterID;nsid=16590902;c=1731436205494) 2024-11-12T18:30:06,170 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfec7c026a03b7889 with lease ID 0x7281ede58ba4aceb: from storage DS-77f41145-dd64-4166-af9a-d64e09cbd89f node DatanodeRegistration(127.0.0.1:44353, datanodeUuid=e1f6d8ff-f752-4bc5-bcb4-8ffcd0068a93, infoPort=44391, infoSecurePort=0, ipcPort=44947, storageInfo=lv=-57;cid=testClusterID;nsid=16590902;c=1731436205494), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-12T18:30:06,205 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c0d07b6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/java.io.tmpdir/jetty-localhost-46759-hadoop-hdfs-3_4_1-tests_jar-_-any-17885180958868716541/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:30:06,206 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69c60fc1{HTTP/1.1, (http/1.1)}{localhost:46759} 2024-11-12T18:30:06,206 INFO [Time-limited test {}] server.Server(415): Started @101968ms 2024-11-12T18:30:06,209 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:30:06,349 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/cluster_da7640f0-d730-e29e-4541-fbddbcd213df/data/data3/current/BP-1733240676-172.17.0.3-1731436205494/current, will proceed with Du for space computation calculation, 2024-11-12T18:30:06,353 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/cluster_da7640f0-d730-e29e-4541-fbddbcd213df/data/data4/current/BP-1733240676-172.17.0.3-1731436205494/current, will proceed with Du for space computation calculation, 2024-11-12T18:30:06,413 WARN [Thread-452 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:30:06,421 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2f71118680b03b84 with lease ID 0x7281ede58ba4acec: Processing first storage report for DS-2b34d667-4032-4011-abc9-1e2cae752bc3 from datanode DatanodeRegistration(127.0.0.1:42129, datanodeUuid=30d96a07-adf6-4f71-baa8-38b2fd24a825, infoPort=32781, infoSecurePort=0, ipcPort=35945, storageInfo=lv=-57;cid=testClusterID;nsid=16590902;c=1731436205494) 2024-11-12T18:30:06,421 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2f71118680b03b84 with lease ID 0x7281ede58ba4acec: from storage DS-2b34d667-4032-4011-abc9-1e2cae752bc3 node DatanodeRegistration(127.0.0.1:42129, datanodeUuid=30d96a07-adf6-4f71-baa8-38b2fd24a825, infoPort=32781, infoSecurePort=0, ipcPort=35945, storageInfo=lv=-57;cid=testClusterID;nsid=16590902;c=1731436205494), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:30:06,421 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2f71118680b03b84 with lease ID 0x7281ede58ba4acec: Processing first storage report for DS-1298a96b-6269-4da2-a27e-f03a7b3f10ca from datanode DatanodeRegistration(127.0.0.1:42129, datanodeUuid=30d96a07-adf6-4f71-baa8-38b2fd24a825, infoPort=32781, infoSecurePort=0, ipcPort=35945, storageInfo=lv=-57;cid=testClusterID;nsid=16590902;c=1731436205494) 2024-11-12T18:30:06,421 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2f71118680b03b84 with lease ID 0x7281ede58ba4acec: from storage DS-1298a96b-6269-4da2-a27e-f03a7b3f10ca node DatanodeRegistration(127.0.0.1:42129, datanodeUuid=30d96a07-adf6-4f71-baa8-38b2fd24a825, infoPort=32781, infoSecurePort=0, ipcPort=35945, storageInfo=lv=-57;cid=testClusterID;nsid=16590902;c=1731436205494), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:30:06,457 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d 2024-11-12T18:30:06,469 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/cluster_da7640f0-d730-e29e-4541-fbddbcd213df/zookeeper_0, clientPort=53806, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/cluster_da7640f0-d730-e29e-4541-fbddbcd213df/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/cluster_da7640f0-d730-e29e-4541-fbddbcd213df/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-12T18:30:06,475 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53806 2024-11-12T18:30:06,475 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:06,477 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:06,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44353 is added to blk_1073741825_1001 (size=7) 2024-11-12T18:30:06,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42129 is added to blk_1073741825_1001 (size=7) 2024-11-12T18:30:06,510 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0 with version=8 2024-11-12T18:30:06,511 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/hbase-staging 2024-11-12T18:30:06,514 INFO [Time-limited test {}] client.ConnectionUtils(128): master/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:30:06,514 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:06,514 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:06,514 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:30:06,514 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:06,515 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:30:06,515 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-12T18:30:06,515 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:30:06,516 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34287 2024-11-12T18:30:06,518 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34287 connecting to ZooKeeper ensemble=127.0.0.1:53806 2024-11-12T18:30:06,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:342870x0, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:30:06,530 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34287-0x1003541e41e0000 connected 2024-11-12T18:30:06,563 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:06,566 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:06,570 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:30:06,570 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0, hbase.cluster.distributed=false 2024-11-12T18:30:06,573 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:30:06,580 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34287 2024-11-12T18:30:06,580 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34287 2024-11-12T18:30:06,581 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34287 2024-11-12T18:30:06,585 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34287 2024-11-12T18:30:06,588 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34287 2024-11-12T18:30:06,612 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:30:06,612 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:06,612 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:06,612 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:30:06,612 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:06,613 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:30:06,613 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T18:30:06,613 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:30:06,614 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42449 2024-11-12T18:30:06,616 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42449 connecting to ZooKeeper ensemble=127.0.0.1:53806 2024-11-12T18:30:06,617 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:06,621 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:06,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:424490x0, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:30:06,637 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:424490x0, quorum=127.0.0.1:53806, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:30:06,638 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T18:30:06,641 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42449-0x1003541e41e0001 connected 2024-11-12T18:30:06,649 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T18:30:06,650 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42449-0x1003541e41e0001, quorum=127.0.0.1:53806, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T18:30:06,652 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42449-0x1003541e41e0001, quorum=127.0.0.1:53806, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:30:06,659 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42449 2024-11-12T18:30:06,659 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42449 2024-11-12T18:30:06,664 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42449 2024-11-12T18:30:06,665 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42449 2024-11-12T18:30:06,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42449 2024-11-12T18:30:06,688 DEBUG [M:0;9911683f163c:34287 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;9911683f163c:34287 2024-11-12T18:30:06,689 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/9911683f163c,34287,1731436206514 2024-11-12T18:30:06,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:30:06,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42449-0x1003541e41e0001, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:30:06,705 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/9911683f163c,34287,1731436206514 2024-11-12T18:30:06,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42449-0x1003541e41e0001, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T18:30:06,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:06,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42449-0x1003541e41e0001, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:06,709 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-12T18:30:06,710 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/9911683f163c,34287,1731436206514 from backup master directory 2024-11-12T18:30:06,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/9911683f163c,34287,1731436206514 2024-11-12T18:30:06,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42449-0x1003541e41e0001, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:30:06,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:30:06,712 WARN [master/9911683f163c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:30:06,712 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=9911683f163c,34287,1731436206514 2024-11-12T18:30:06,740 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/hbase.id] with ID: 2b68faae-0786-42f5-aa84-89701c20ce52 2024-11-12T18:30:06,741 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/.tmp/hbase.id 2024-11-12T18:30:06,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42129 is added to blk_1073741826_1002 (size=42) 2024-11-12T18:30:06,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44353 is added to blk_1073741826_1002 (size=42) 2024-11-12T18:30:06,765 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/.tmp/hbase.id]:[hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/hbase.id] 2024-11-12T18:30:06,787 INFO [master/9911683f163c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:06,787 INFO [master/9911683f163c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-12T18:30:06,797 INFO [master/9911683f163c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 10ms. 2024-11-12T18:30:06,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:06,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42449-0x1003541e41e0001, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:06,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42129 is added to blk_1073741827_1003 (size=196) 2024-11-12T18:30:06,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44353 is added to blk_1073741827_1003 (size=196) 2024-11-12T18:30:06,832 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T18:30:06,833 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-12T18:30:06,833 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:30:06,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44353 is added to blk_1073741828_1004 (size=1189) 2024-11-12T18:30:06,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42129 is added to blk_1073741828_1004 (size=1189) 2024-11-12T18:30:06,845 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store 2024-11-12T18:30:06,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44353 is added to blk_1073741829_1005 (size=34) 2024-11-12T18:30:06,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42129 is added to blk_1073741829_1005 (size=34) 2024-11-12T18:30:06,857 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:30:06,857 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T18:30:06,857 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:06,857 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:06,857 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T18:30:06,857 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:06,857 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:06,858 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731436206857Disabling compacts and flushes for region at 1731436206857Disabling writes for close at 1731436206857Writing region close event to WAL at 1731436206857Closed at 1731436206857 2024-11-12T18:30:06,859 WARN [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/.initializing 2024-11-12T18:30:06,859 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/WALs/9911683f163c,34287,1731436206514 2024-11-12T18:30:06,863 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C34287%2C1731436206514, suffix=, logDir=hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/WALs/9911683f163c,34287,1731436206514, archiveDir=hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/oldWALs, maxLogs=10 2024-11-12T18:30:06,864 INFO [master/9911683f163c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C34287%2C1731436206514.1731436206864 2024-11-12T18:30:06,870 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/WALs/9911683f163c,34287,1731436206514/9911683f163c%2C34287%2C1731436206514.1731436206864 2024-11-12T18:30:06,871 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32781:32781),(127.0.0.1/127.0.0.1:44391:44391)] 2024-11-12T18:30:06,872 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:30:06,872 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:30:06,872 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:06,872 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:06,877 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:06,879 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-12T18:30:06,879 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:06,879 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:06,879 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:06,881 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-12T18:30:06,881 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:06,882 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:30:06,882 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:06,884 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-12T18:30:06,885 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:06,885 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:30:06,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:06,887 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-12T18:30:06,887 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:06,887 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:30:06,888 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:06,889 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:06,889 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:06,891 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:06,891 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:06,891 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T18:30:06,893 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:06,895 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:30:06,895 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=748609, jitterRate=-0.04809559881687164}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T18:30:06,897 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731436206873Initializing all the Stores at 1731436206874 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436206874Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436206876 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436206876Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436206876Cleaning up temporary data from old regions at 1731436206891 (+15 ms)Region opened successfully at 1731436206897 (+6 ms) 2024-11-12T18:30:06,898 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-12T18:30:06,903 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@455f4ade, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:30:06,905 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-12T18:30:06,905 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-12T18:30:06,905 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-12T18:30:06,905 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-12T18:30:06,906 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-12T18:30:06,906 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-12T18:30:06,906 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-12T18:30:06,909 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-12T18:30:06,911 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-12T18:30:06,912 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-12T18:30:06,913 INFO [master/9911683f163c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-12T18:30:06,914 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-12T18:30:06,915 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-12T18:30:06,916 INFO [master/9911683f163c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-12T18:30:06,917 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-12T18:30:06,919 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-12T18:30:06,920 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-12T18:30:06,921 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-12T18:30:06,924 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-12T18:30:06,936 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-12T18:30:06,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42449-0x1003541e41e0001, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:30:06,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:30:06,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42449-0x1003541e41e0001, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:06,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:06,953 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=9911683f163c,34287,1731436206514, sessionid=0x1003541e41e0000, setting cluster-up flag (Was=false) 2024-11-12T18:30:06,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:06,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42449-0x1003541e41e0001, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:06,971 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-12T18:30:06,972 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9911683f163c,34287,1731436206514 2024-11-12T18:30:06,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:06,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42449-0x1003541e41e0001, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:06,981 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-12T18:30:06,982 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9911683f163c,34287,1731436206514 2024-11-12T18:30:06,984 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-12T18:30:06,986 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-12T18:30:06,986 INFO [master/9911683f163c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-12T18:30:06,986 INFO [master/9911683f163c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-12T18:30:06,986 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 9911683f163c,34287,1731436206514 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-12T18:30:06,988 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:30:06,988 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:30:06,988 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:30:06,988 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:30:06,988 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/9911683f163c:0, corePoolSize=10, maxPoolSize=10 2024-11-12T18:30:06,988 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:06,988 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:30:06,988 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:06,989 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731436236989 2024-11-12T18:30:06,989 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-12T18:30:06,989 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-12T18:30:06,989 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-12T18:30:06,989 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-12T18:30:06,989 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-12T18:30:06,989 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-12T18:30:06,990 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:06,990 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-12T18:30:06,990 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-12T18:30:06,990 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-12T18:30:06,990 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:30:06,990 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-12T18:30:06,991 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-12T18:30:06,991 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-12T18:30:06,991 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436206991,5,FailOnTimeoutGroup] 2024-11-12T18:30:06,991 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436206991,5,FailOnTimeoutGroup] 2024-11-12T18:30:06,991 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:06,991 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-12T18:30:06,991 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:06,992 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:06,992 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:06,992 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-12T18:30:07,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44353 is added to blk_1073741831_1007 (size=1321) 2024-11-12T18:30:07,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42129 is added to blk_1073741831_1007 (size=1321) 2024-11-12T18:30:07,002 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-12T18:30:07,002 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0 2024-11-12T18:30:07,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44353 is added to blk_1073741832_1008 (size=32) 2024-11-12T18:30:07,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42129 is added to blk_1073741832_1008 (size=32) 2024-11-12T18:30:07,013 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:30:07,014 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T18:30:07,016 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T18:30:07,016 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:07,017 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:07,017 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T18:30:07,019 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T18:30:07,019 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:07,020 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:07,020 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T18:30:07,022 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T18:30:07,022 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:07,022 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:07,023 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T18:30:07,024 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T18:30:07,025 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:07,025 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:07,025 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T18:30:07,026 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/data/hbase/meta/1588230740 2024-11-12T18:30:07,027 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/data/hbase/meta/1588230740 2024-11-12T18:30:07,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T18:30:07,029 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T18:30:07,029 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T18:30:07,031 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T18:30:07,034 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:30:07,035 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=740208, jitterRate=-0.058778077363967896}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T18:30:07,037 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731436207013Initializing all the Stores at 1731436207014 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436207014Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436207014Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436207014Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436207014Cleaning up temporary data from old regions at 1731436207029 (+15 ms)Region opened successfully at 1731436207037 (+8 ms) 2024-11-12T18:30:07,037 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T18:30:07,037 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T18:30:07,037 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T18:30:07,037 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T18:30:07,037 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T18:30:07,038 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T18:30:07,038 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731436207037Disabling compacts and flushes for region at 1731436207037Disabling writes for close at 1731436207037Writing region close event to WAL at 1731436207038 (+1 ms)Closed at 1731436207038 2024-11-12T18:30:07,040 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:30:07,040 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-12T18:30:07,040 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-12T18:30:07,043 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T18:30:07,044 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-12T18:30:07,070 INFO [RS:0;9911683f163c:42449 {}] regionserver.HRegionServer(746): ClusterId : 2b68faae-0786-42f5-aa84-89701c20ce52 2024-11-12T18:30:07,070 DEBUG [RS:0;9911683f163c:42449 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T18:30:07,073 DEBUG [RS:0;9911683f163c:42449 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T18:30:07,073 DEBUG [RS:0;9911683f163c:42449 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T18:30:07,076 DEBUG [RS:0;9911683f163c:42449 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T18:30:07,076 DEBUG [RS:0;9911683f163c:42449 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f4e1f25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:30:07,090 DEBUG [RS:0;9911683f163c:42449 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;9911683f163c:42449 2024-11-12T18:30:07,090 INFO [RS:0;9911683f163c:42449 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T18:30:07,090 INFO [RS:0;9911683f163c:42449 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T18:30:07,090 DEBUG [RS:0;9911683f163c:42449 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T18:30:07,091 INFO [RS:0;9911683f163c:42449 {}] regionserver.HRegionServer(2659): reportForDuty to master=9911683f163c,34287,1731436206514 with port=42449, startcode=1731436206612 2024-11-12T18:30:07,091 DEBUG [RS:0;9911683f163c:42449 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T18:30:07,094 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49539, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T18:30:07,095 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34287 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9911683f163c,42449,1731436206612 2024-11-12T18:30:07,095 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34287 {}] master.ServerManager(517): Registering regionserver=9911683f163c,42449,1731436206612 2024-11-12T18:30:07,097 DEBUG [RS:0;9911683f163c:42449 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0 2024-11-12T18:30:07,098 DEBUG [RS:0;9911683f163c:42449 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40907 2024-11-12T18:30:07,098 DEBUG [RS:0;9911683f163c:42449 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T18:30:07,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:30:07,101 DEBUG [RS:0;9911683f163c:42449 {}] zookeeper.ZKUtil(111): regionserver:42449-0x1003541e41e0001, quorum=127.0.0.1:53806, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9911683f163c,42449,1731436206612 2024-11-12T18:30:07,101 WARN [RS:0;9911683f163c:42449 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:30:07,101 INFO [RS:0;9911683f163c:42449 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:30:07,101 DEBUG [RS:0;9911683f163c:42449 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/WALs/9911683f163c,42449,1731436206612 2024-11-12T18:30:07,103 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9911683f163c,42449,1731436206612] 2024-11-12T18:30:07,108 INFO [RS:0;9911683f163c:42449 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T18:30:07,113 INFO [RS:0;9911683f163c:42449 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T18:30:07,115 INFO [RS:0;9911683f163c:42449 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T18:30:07,115 INFO [RS:0;9911683f163c:42449 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:07,116 INFO [RS:0;9911683f163c:42449 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T18:30:07,118 INFO [RS:0;9911683f163c:42449 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T18:30:07,118 INFO [RS:0;9911683f163c:42449 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:07,118 DEBUG [RS:0;9911683f163c:42449 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:07,118 DEBUG [RS:0;9911683f163c:42449 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:07,118 DEBUG [RS:0;9911683f163c:42449 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:07,119 DEBUG [RS:0;9911683f163c:42449 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:07,119 DEBUG [RS:0;9911683f163c:42449 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:07,119 DEBUG [RS:0;9911683f163c:42449 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:30:07,119 DEBUG [RS:0;9911683f163c:42449 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:07,119 DEBUG [RS:0;9911683f163c:42449 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:07,119 DEBUG [RS:0;9911683f163c:42449 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:07,119 DEBUG [RS:0;9911683f163c:42449 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:07,119 DEBUG [RS:0;9911683f163c:42449 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:07,119 DEBUG [RS:0;9911683f163c:42449 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:07,119 DEBUG [RS:0;9911683f163c:42449 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:30:07,120 DEBUG [RS:0;9911683f163c:42449 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:30:07,123 INFO [RS:0;9911683f163c:42449 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:07,123 INFO [RS:0;9911683f163c:42449 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:07,123 INFO [RS:0;9911683f163c:42449 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:07,123 INFO [RS:0;9911683f163c:42449 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:07,123 INFO [RS:0;9911683f163c:42449 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:07,124 INFO [RS:0;9911683f163c:42449 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,42449,1731436206612-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:30:07,141 INFO [RS:0;9911683f163c:42449 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T18:30:07,141 INFO [RS:0;9911683f163c:42449 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,42449,1731436206612-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:07,141 INFO [RS:0;9911683f163c:42449 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:07,141 INFO [RS:0;9911683f163c:42449 {}] regionserver.Replication(171): 9911683f163c,42449,1731436206612 started 2024-11-12T18:30:07,157 INFO [RS:0;9911683f163c:42449 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:07,158 INFO [RS:0;9911683f163c:42449 {}] regionserver.HRegionServer(1482): Serving as 9911683f163c,42449,1731436206612, RpcServer on 9911683f163c/172.17.0.3:42449, sessionid=0x1003541e41e0001 2024-11-12T18:30:07,158 DEBUG [RS:0;9911683f163c:42449 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T18:30:07,158 DEBUG [RS:0;9911683f163c:42449 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9911683f163c,42449,1731436206612 2024-11-12T18:30:07,158 DEBUG [RS:0;9911683f163c:42449 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,42449,1731436206612' 2024-11-12T18:30:07,158 DEBUG [RS:0;9911683f163c:42449 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T18:30:07,159 DEBUG [RS:0;9911683f163c:42449 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T18:30:07,159 DEBUG [RS:0;9911683f163c:42449 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T18:30:07,159 DEBUG [RS:0;9911683f163c:42449 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T18:30:07,159 DEBUG [RS:0;9911683f163c:42449 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9911683f163c,42449,1731436206612 2024-11-12T18:30:07,159 DEBUG [RS:0;9911683f163c:42449 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,42449,1731436206612' 2024-11-12T18:30:07,159 DEBUG [RS:0;9911683f163c:42449 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T18:30:07,160 DEBUG [RS:0;9911683f163c:42449 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T18:30:07,160 DEBUG [RS:0;9911683f163c:42449 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T18:30:07,160 INFO [RS:0;9911683f163c:42449 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T18:30:07,161 INFO [RS:0;9911683f163c:42449 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T18:30:07,194 WARN [9911683f163c:34287 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-12T18:30:07,263 INFO [RS:0;9911683f163c:42449 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C42449%2C1731436206612, suffix=, logDir=hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/WALs/9911683f163c,42449,1731436206612, archiveDir=hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/oldWALs, maxLogs=32 2024-11-12T18:30:07,265 INFO [RS:0;9911683f163c:42449 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C42449%2C1731436206612.1731436207265 2024-11-12T18:30:07,273 INFO [RS:0;9911683f163c:42449 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/WALs/9911683f163c,42449,1731436206612/9911683f163c%2C42449%2C1731436206612.1731436207265 2024-11-12T18:30:07,274 DEBUG [RS:0;9911683f163c:42449 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32781:32781),(127.0.0.1/127.0.0.1:44391:44391)] 2024-11-12T18:30:07,445 DEBUG [9911683f163c:34287 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-12T18:30:07,445 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=9911683f163c,42449,1731436206612 2024-11-12T18:30:07,448 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9911683f163c,42449,1731436206612, state=OPENING 2024-11-12T18:30:07,450 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-12T18:30:07,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:07,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42449-0x1003541e41e0001, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:07,452 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:30:07,452 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T18:30:07,452 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:30:07,452 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=9911683f163c,42449,1731436206612}] 2024-11-12T18:30:07,607 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T18:30:07,609 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54179, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T18:30:07,614 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-12T18:30:07,614 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:30:07,616 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C42449%2C1731436206612.meta, suffix=.meta, logDir=hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/WALs/9911683f163c,42449,1731436206612, archiveDir=hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/oldWALs, maxLogs=32 2024-11-12T18:30:07,618 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C42449%2C1731436206612.meta.1731436207618.meta 2024-11-12T18:30:07,625 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/WALs/9911683f163c,42449,1731436206612/9911683f163c%2C42449%2C1731436206612.meta.1731436207618.meta 2024-11-12T18:30:07,631 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32781:32781),(127.0.0.1/127.0.0.1:44391:44391)] 2024-11-12T18:30:07,632 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:30:07,633 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-12T18:30:07,633 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-12T18:30:07,633 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-12T18:30:07,633 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-12T18:30:07,633 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:30:07,634 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-12T18:30:07,634 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-12T18:30:07,636 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T18:30:07,637 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T18:30:07,637 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:07,638 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:07,638 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T18:30:07,639 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T18:30:07,639 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:07,640 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:07,640 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T18:30:07,641 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T18:30:07,641 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:07,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:07,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T18:30:07,642 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T18:30:07,642 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:07,643 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:07,643 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T18:30:07,644 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/data/hbase/meta/1588230740 2024-11-12T18:30:07,646 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/data/hbase/meta/1588230740 2024-11-12T18:30:07,647 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T18:30:07,647 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T18:30:07,648 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T18:30:07,650 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T18:30:07,651 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=786106, jitterRate=-4.1550397872924805E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T18:30:07,651 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-12T18:30:07,653 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731436207634Writing region info on filesystem at 1731436207634Initializing all the Stores at 1731436207636 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436207636Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436207636Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436207636Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436207636Cleaning up temporary data from old regions at 1731436207647 (+11 ms)Running coprocessor post-open hooks at 1731436207651 (+4 ms)Region opened successfully at 1731436207652 (+1 ms) 2024-11-12T18:30:07,654 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731436207606 2024-11-12T18:30:07,658 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-12T18:30:07,658 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-12T18:30:07,659 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=9911683f163c,42449,1731436206612 2024-11-12T18:30:07,661 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9911683f163c,42449,1731436206612, state=OPEN 2024-11-12T18:30:07,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:30:07,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42449-0x1003541e41e0001, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:30:07,666 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=9911683f163c,42449,1731436206612 2024-11-12T18:30:07,666 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:30:07,666 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:30:07,669 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-12T18:30:07,669 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=9911683f163c,42449,1731436206612 in 214 msec 2024-11-12T18:30:07,672 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-12T18:30:07,672 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 629 msec 2024-11-12T18:30:07,673 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:30:07,673 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-12T18:30:07,675 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T18:30:07,675 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9911683f163c,42449,1731436206612, seqNum=-1] 2024-11-12T18:30:07,675 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T18:30:07,677 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58519, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T18:30:07,685 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 698 msec 2024-11-12T18:30:07,685 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731436207685, completionTime=-1 2024-11-12T18:30:07,685 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-12T18:30:07,685 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-12T18:30:07,687 INFO [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-12T18:30:07,687 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731436267687 2024-11-12T18:30:07,687 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731436327687 2024-11-12T18:30:07,687 INFO [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-12T18:30:07,688 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,34287,1731436206514-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:07,688 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,34287,1731436206514-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:07,688 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,34287,1731436206514-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:07,688 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-9911683f163c:34287, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:07,688 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:07,688 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:07,690 DEBUG [master/9911683f163c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-12T18:30:07,693 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.981sec 2024-11-12T18:30:07,693 INFO [master/9911683f163c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-12T18:30:07,693 INFO [master/9911683f163c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-12T18:30:07,693 INFO [master/9911683f163c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-12T18:30:07,693 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-12T18:30:07,693 INFO [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-12T18:30:07,693 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,34287,1731436206514-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:30:07,693 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,34287,1731436206514-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-12T18:30:07,696 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-12T18:30:07,696 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-12T18:30:07,696 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,34287,1731436206514-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:07,770 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@457836e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:30:07,771 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 9911683f163c,34287,-1 for getting cluster id 2024-11-12T18:30:07,771 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-12T18:30:07,774 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2b68faae-0786-42f5-aa84-89701c20ce52' 2024-11-12T18:30:07,775 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-12T18:30:07,775 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2b68faae-0786-42f5-aa84-89701c20ce52" 2024-11-12T18:30:07,776 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12c50cd2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:30:07,776 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9911683f163c,34287,-1] 2024-11-12T18:30:07,776 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-12T18:30:07,777 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:30:07,779 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52990, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-12T18:30:07,781 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fb35389, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:30:07,781 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T18:30:07,784 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9911683f163c,42449,1731436206612, seqNum=-1] 2024-11-12T18:30:07,785 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T18:30:07,787 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55162, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T18:30:07,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=9911683f163c,34287,1731436206514 2024-11-12T18:30:07,792 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:07,796 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-12T18:30:07,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-12T18:30:07,796 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T18:30:07,796 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:30:07,796 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:30:07,797 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:30:07,797 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-12T18:30:07,797 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-12T18:30:07,797 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1917248166, stopped=false 2024-11-12T18:30:07,797 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=9911683f163c,34287,1731436206514 2024-11-12T18:30:07,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:30:07,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:07,800 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T18:30:07,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42449-0x1003541e41e0001, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:30:07,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42449-0x1003541e41e0001, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:07,801 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T18:30:07,801 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42449-0x1003541e41e0001, quorum=127.0.0.1:53806, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:30:07,801 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:30:07,801 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:30:07,801 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:30:07,802 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9911683f163c,42449,1731436206612' ***** 2024-11-12T18:30:07,802 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T18:30:07,802 INFO [RS:0;9911683f163c:42449 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T18:30:07,803 INFO [RS:0;9911683f163c:42449 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T18:30:07,803 INFO [RS:0;9911683f163c:42449 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T18:30:07,803 INFO [RS:0;9911683f163c:42449 {}] regionserver.HRegionServer(959): stopping server 9911683f163c,42449,1731436206612 2024-11-12T18:30:07,803 INFO [RS:0;9911683f163c:42449 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:30:07,803 INFO [RS:0;9911683f163c:42449 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;9911683f163c:42449. 2024-11-12T18:30:07,803 DEBUG [RS:0;9911683f163c:42449 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:30:07,803 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T18:30:07,803 DEBUG [RS:0;9911683f163c:42449 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:30:07,803 INFO [RS:0;9911683f163c:42449 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T18:30:07,803 INFO [RS:0;9911683f163c:42449 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T18:30:07,803 INFO [RS:0;9911683f163c:42449 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T18:30:07,803 INFO [RS:0;9911683f163c:42449 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-12T18:30:07,804 INFO [RS:0;9911683f163c:42449 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-12T18:30:07,804 DEBUG [RS:0;9911683f163c:42449 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-12T18:30:07,805 DEBUG [RS:0;9911683f163c:42449 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-12T18:30:07,805 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T18:30:07,805 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T18:30:07,805 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T18:30:07,805 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T18:30:07,805 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T18:30:07,805 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-12T18:30:07,826 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/data/hbase/meta/1588230740/.tmp/ns/d1feb13737b34c2d80feec75abac3e96 is 43, key is default/ns:d/1731436207678/Put/seqid=0 2024-11-12T18:30:07,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44353 is added to blk_1073741835_1011 (size=5153) 2024-11-12T18:30:07,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42129 is added to blk_1073741835_1011 (size=5153) 2024-11-12T18:30:07,838 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/data/hbase/meta/1588230740/.tmp/ns/d1feb13737b34c2d80feec75abac3e96 2024-11-12T18:30:07,846 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/data/hbase/meta/1588230740/.tmp/ns/d1feb13737b34c2d80feec75abac3e96 as hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/data/hbase/meta/1588230740/ns/d1feb13737b34c2d80feec75abac3e96 2024-11-12T18:30:07,855 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/data/hbase/meta/1588230740/ns/d1feb13737b34c2d80feec75abac3e96, entries=2, sequenceid=6, filesize=5.0 K 2024-11-12T18:30:07,856 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 51ms, sequenceid=6, compaction requested=false 2024-11-12T18:30:07,857 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-12T18:30:07,863 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-12T18:30:07,863 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T18:30:07,864 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T18:30:07,864 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731436207805Running coprocessor pre-close hooks at 1731436207805Disabling compacts and flushes for region at 1731436207805Disabling writes for close at 1731436207805Obtaining lock to block concurrent updates at 1731436207805Preparing flush snapshotting stores in 1588230740 at 1731436207805Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731436207806 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731436207807 (+1 ms)Flushing 1588230740/ns: creating writer at 1731436207807Flushing 1588230740/ns: appending metadata at 1731436207826 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1731436207826Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@734e6748: reopening flushed file at 1731436207845 (+19 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 51ms, sequenceid=6, compaction requested=false at 1731436207856 (+11 ms)Writing region close event to WAL at 1731436207858 (+2 ms)Running coprocessor post-close hooks at 1731436207863 (+5 ms)Closed at 1731436207864 (+1 ms) 2024-11-12T18:30:07,864 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-12T18:30:08,005 INFO [RS:0;9911683f163c:42449 {}] regionserver.HRegionServer(976): stopping server 9911683f163c,42449,1731436206612; all regions closed. 2024-11-12T18:30:08,005 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:08,006 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:08,006 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:08,006 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:08,006 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:08,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44353 is added to blk_1073741834_1010 (size=1152) 2024-11-12T18:30:08,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42129 is added to blk_1073741834_1010 (size=1152) 2024-11-12T18:30:08,012 DEBUG [RS:0;9911683f163c:42449 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/oldWALs 2024-11-12T18:30:08,012 INFO [RS:0;9911683f163c:42449 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9911683f163c%2C42449%2C1731436206612.meta:.meta(num 1731436207618) 2024-11-12T18:30:08,012 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:08,012 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:08,012 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:08,013 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:08,013 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:08,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44353 is added to blk_1073741833_1009 (size=93) 2024-11-12T18:30:08,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42129 is added to blk_1073741833_1009 (size=93) 2024-11-12T18:30:08,017 DEBUG [RS:0;9911683f163c:42449 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/oldWALs 2024-11-12T18:30:08,017 INFO [RS:0;9911683f163c:42449 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9911683f163c%2C42449%2C1731436206612:(num 1731436207265) 2024-11-12T18:30:08,017 DEBUG [RS:0;9911683f163c:42449 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:30:08,017 INFO [RS:0;9911683f163c:42449 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:30:08,017 INFO [RS:0;9911683f163c:42449 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:30:08,017 INFO [RS:0;9911683f163c:42449 {}] hbase.ChoreService(370): Chore service for: regionserver/9911683f163c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-12T18:30:08,018 INFO [RS:0;9911683f163c:42449 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:30:08,018 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:30:08,018 INFO [RS:0;9911683f163c:42449 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42449 2024-11-12T18:30:08,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42449-0x1003541e41e0001, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9911683f163c,42449,1731436206612 2024-11-12T18:30:08,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:30:08,020 INFO [RS:0;9911683f163c:42449 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:30:08,022 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9911683f163c,42449,1731436206612] 2024-11-12T18:30:08,023 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9911683f163c,42449,1731436206612 already deleted, retry=false 2024-11-12T18:30:08,023 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9911683f163c,42449,1731436206612 expired; onlineServers=0 2024-11-12T18:30:08,024 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '9911683f163c,34287,1731436206514' ***** 2024-11-12T18:30:08,024 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-12T18:30:08,024 INFO [M:0;9911683f163c:34287 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:30:08,024 INFO [M:0;9911683f163c:34287 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:30:08,024 DEBUG [M:0;9911683f163c:34287 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-12T18:30:08,024 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-12T18:30:08,024 DEBUG [M:0;9911683f163c:34287 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-12T18:30:08,024 DEBUG [master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436206991 {}] cleaner.HFileCleaner(306): Exit Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436206991,5,FailOnTimeoutGroup] 2024-11-12T18:30:08,024 DEBUG [master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436206991 {}] cleaner.HFileCleaner(306): Exit Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436206991,5,FailOnTimeoutGroup] 2024-11-12T18:30:08,024 INFO [M:0;9911683f163c:34287 {}] hbase.ChoreService(370): Chore service for: master/9911683f163c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-12T18:30:08,024 INFO [M:0;9911683f163c:34287 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:30:08,025 DEBUG [M:0;9911683f163c:34287 {}] master.HMaster(1795): Stopping service threads 2024-11-12T18:30:08,025 INFO [M:0;9911683f163c:34287 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-12T18:30:08,025 INFO [M:0;9911683f163c:34287 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T18:30:08,025 INFO [M:0;9911683f163c:34287 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-12T18:30:08,025 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-12T18:30:08,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-12T18:30:08,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:08,026 DEBUG [M:0;9911683f163c:34287 {}] zookeeper.ZKUtil(347): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-12T18:30:08,026 WARN [M:0;9911683f163c:34287 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-12T18:30:08,027 INFO [M:0;9911683f163c:34287 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/.lastflushedseqids 2024-11-12T18:30:08,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44353 is added to blk_1073741836_1012 (size=99) 2024-11-12T18:30:08,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42129 is added to blk_1073741836_1012 (size=99) 2024-11-12T18:30:08,035 INFO [M:0;9911683f163c:34287 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-12T18:30:08,035 INFO [M:0;9911683f163c:34287 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-12T18:30:08,035 DEBUG [M:0;9911683f163c:34287 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T18:30:08,035 INFO [M:0;9911683f163c:34287 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:08,036 DEBUG [M:0;9911683f163c:34287 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:08,036 DEBUG [M:0;9911683f163c:34287 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T18:30:08,036 DEBUG [M:0;9911683f163c:34287 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:08,036 INFO [M:0;9911683f163c:34287 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-12T18:30:08,063 DEBUG [M:0;9911683f163c:34287 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/73e6d548fb3e4b21976c003b49f8a29b is 82, key is hbase:meta,,1/info:regioninfo/1731436207659/Put/seqid=0 2024-11-12T18:30:08,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44353 is added to blk_1073741837_1013 (size=5672) 2024-11-12T18:30:08,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42129 is added to blk_1073741837_1013 (size=5672) 2024-11-12T18:30:08,070 INFO [M:0;9911683f163c:34287 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/73e6d548fb3e4b21976c003b49f8a29b 2024-11-12T18:30:08,093 DEBUG [M:0;9911683f163c:34287 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/82b594b799aa447b8237308ecb547321 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731436207684/Put/seqid=0 2024-11-12T18:30:08,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42129 is added to blk_1073741838_1014 (size=5275) 2024-11-12T18:30:08,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44353 is added to blk_1073741838_1014 (size=5275) 2024-11-12T18:30:08,100 INFO [M:0;9911683f163c:34287 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/82b594b799aa447b8237308ecb547321 2024-11-12T18:30:08,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42449-0x1003541e41e0001, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:30:08,122 INFO [RS:0;9911683f163c:42449 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:30:08,122 INFO [RS:0;9911683f163c:42449 {}] regionserver.HRegionServer(1031): Exiting; stopping=9911683f163c,42449,1731436206612; zookeeper connection closed. 2024-11-12T18:30:08,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42449-0x1003541e41e0001, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:30:08,122 DEBUG [M:0;9911683f163c:34287 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/27df106525eb4727abe4ccdfe12ebbeb is 69, key is 9911683f163c,42449,1731436206612/rs:state/1731436207095/Put/seqid=0 2024-11-12T18:30:08,122 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1401998e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1401998e 2024-11-12T18:30:08,123 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-12T18:30:08,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44353 is added to blk_1073741839_1015 (size=5156) 2024-11-12T18:30:08,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42129 is added to blk_1073741839_1015 (size=5156) 2024-11-12T18:30:08,128 INFO [M:0;9911683f163c:34287 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/27df106525eb4727abe4ccdfe12ebbeb 2024-11-12T18:30:08,151 DEBUG [M:0;9911683f163c:34287 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/250f98c1cb514c20a0f4023065e81900 is 52, key is load_balancer_on/state:d/1731436207794/Put/seqid=0 2024-11-12T18:30:08,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44353 is added to blk_1073741840_1016 (size=5056) 2024-11-12T18:30:08,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42129 is added to blk_1073741840_1016 (size=5056) 2024-11-12T18:30:08,157 INFO [M:0;9911683f163c:34287 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/250f98c1cb514c20a0f4023065e81900 2024-11-12T18:30:08,164 DEBUG [M:0;9911683f163c:34287 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/73e6d548fb3e4b21976c003b49f8a29b as hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/73e6d548fb3e4b21976c003b49f8a29b 2024-11-12T18:30:08,171 INFO [M:0;9911683f163c:34287 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/73e6d548fb3e4b21976c003b49f8a29b, entries=8, sequenceid=29, filesize=5.5 K 2024-11-12T18:30:08,172 DEBUG [M:0;9911683f163c:34287 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/82b594b799aa447b8237308ecb547321 as hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/82b594b799aa447b8237308ecb547321 2024-11-12T18:30:08,179 INFO [M:0;9911683f163c:34287 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/82b594b799aa447b8237308ecb547321, entries=3, sequenceid=29, filesize=5.2 K 2024-11-12T18:30:08,180 DEBUG [M:0;9911683f163c:34287 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/27df106525eb4727abe4ccdfe12ebbeb as hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/27df106525eb4727abe4ccdfe12ebbeb 2024-11-12T18:30:08,186 INFO [M:0;9911683f163c:34287 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/27df106525eb4727abe4ccdfe12ebbeb, entries=1, sequenceid=29, filesize=5.0 K 2024-11-12T18:30:08,187 DEBUG [M:0;9911683f163c:34287 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/250f98c1cb514c20a0f4023065e81900 as hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/250f98c1cb514c20a0f4023065e81900 2024-11-12T18:30:08,193 INFO [M:0;9911683f163c:34287 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40907/user/jenkins/test-data/efed2d7c-2a4d-69c2-6623-b475dd1afde0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/250f98c1cb514c20a0f4023065e81900, entries=1, sequenceid=29, filesize=4.9 K 2024-11-12T18:30:08,195 INFO [M:0;9911683f163c:34287 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 158ms, sequenceid=29, compaction requested=false 2024-11-12T18:30:08,196 INFO [M:0;9911683f163c:34287 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:08,196 DEBUG [M:0;9911683f163c:34287 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731436208035Disabling compacts and flushes for region at 1731436208035Disabling writes for close at 1731436208036 (+1 ms)Obtaining lock to block concurrent updates at 1731436208036Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731436208036Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731436208037 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731436208038 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731436208038Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731436208063 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731436208063Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731436208076 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731436208092 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731436208093 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731436208105 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731436208122 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731436208122Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731436208135 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731436208151 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731436208151Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d368007: reopening flushed file at 1731436208163 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b5f99c8: reopening flushed file at 1731436208171 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4f1a6d3c: reopening flushed file at 1731436208179 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1289de23: reopening flushed file at 1731436208186 (+7 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 158ms, sequenceid=29, compaction requested=false at 1731436208195 (+9 ms)Writing region close event to WAL at 1731436208196 (+1 ms)Closed at 1731436208196 2024-11-12T18:30:08,197 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:08,197 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:08,197 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:08,197 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:08,198 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:08,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44353 is added to blk_1073741830_1006 (size=10311) 2024-11-12T18:30:08,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42129 is added to blk_1073741830_1006 (size=10311) 2024-11-12T18:30:08,201 INFO [M:0;9911683f163c:34287 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-12T18:30:08,201 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:30:08,201 INFO [M:0;9911683f163c:34287 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34287 2024-11-12T18:30:08,201 INFO [M:0;9911683f163c:34287 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:30:08,304 INFO [M:0;9911683f163c:34287 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:30:08,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:30:08,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34287-0x1003541e41e0000, quorum=127.0.0.1:53806, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:30:08,307 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c0d07b6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:30:08,307 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69c60fc1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:30:08,307 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:30:08,308 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7966340b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:30:08,308 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4038b61b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/hadoop.log.dir/,STOPPED} 2024-11-12T18:30:08,315 WARN [BP-1733240676-172.17.0.3-1731436205494 heartbeating to localhost/127.0.0.1:40907 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:30:08,315 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:30:08,315 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:30:08,315 WARN [BP-1733240676-172.17.0.3-1731436205494 heartbeating to localhost/127.0.0.1:40907 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1733240676-172.17.0.3-1731436205494 (Datanode Uuid 30d96a07-adf6-4f71-baa8-38b2fd24a825) service to localhost/127.0.0.1:40907 2024-11-12T18:30:08,316 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/cluster_da7640f0-d730-e29e-4541-fbddbcd213df/data/data3/current/BP-1733240676-172.17.0.3-1731436205494 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:30:08,316 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/cluster_da7640f0-d730-e29e-4541-fbddbcd213df/data/data4/current/BP-1733240676-172.17.0.3-1731436205494 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:30:08,317 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:30:08,320 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7853b6ca{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:30:08,320 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a9173f6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:30:08,320 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:30:08,320 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f8081e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:30:08,320 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f2d2177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/hadoop.log.dir/,STOPPED} 2024-11-12T18:30:08,322 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:30:08,322 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:30:08,323 WARN [BP-1733240676-172.17.0.3-1731436205494 heartbeating to localhost/127.0.0.1:40907 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:30:08,323 WARN [BP-1733240676-172.17.0.3-1731436205494 heartbeating to localhost/127.0.0.1:40907 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1733240676-172.17.0.3-1731436205494 (Datanode Uuid e1f6d8ff-f752-4bc5-bcb4-8ffcd0068a93) service to localhost/127.0.0.1:40907 2024-11-12T18:30:08,324 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/cluster_da7640f0-d730-e29e-4541-fbddbcd213df/data/data1/current/BP-1733240676-172.17.0.3-1731436205494 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:30:08,324 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/cluster_da7640f0-d730-e29e-4541-fbddbcd213df/data/data2/current/BP-1733240676-172.17.0.3-1731436205494 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:30:08,324 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:30:08,331 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@ec92eb9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T18:30:08,332 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@338bb31f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:30:08,332 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:30:08,332 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47276af2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:30:08,332 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58d2c7f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/hadoop.log.dir/,STOPPED} 2024-11-12T18:30:08,342 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-12T18:30:08,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-12T18:30:08,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-12T18:30:08,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/hadoop.log.dir so I do NOT create it in target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd 2024-11-12T18:30:08,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a0953a4-5955-1eb9-0af0-c4fd6ccd485d/hadoop.tmp.dir so I do NOT create it in target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd 2024-11-12T18:30:08,363 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21, deleteOnExit=true 2024-11-12T18:30:08,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-12T18:30:08,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/test.cache.data in system properties and HBase conf 2024-11-12T18:30:08,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/hadoop.tmp.dir in system properties and HBase conf 2024-11-12T18:30:08,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/hadoop.log.dir in system properties and HBase conf 2024-11-12T18:30:08,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-12T18:30:08,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-12T18:30:08,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-12T18:30:08,364 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-12T18:30:08,364 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-12T18:30:08,364 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-12T18:30:08,364 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-12T18:30:08,364 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T18:30:08,364 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-12T18:30:08,365 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-12T18:30:08,365 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T18:30:08,365 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T18:30:08,365 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-12T18:30:08,365 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/nfs.dump.dir in system properties and HBase conf 2024-11-12T18:30:08,365 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/java.io.tmpdir in system properties and HBase conf 2024-11-12T18:30:08,365 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T18:30:08,365 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-12T18:30:08,365 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-12T18:30:08,380 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-12T18:30:08,454 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:30:08,460 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:30:08,465 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:30:08,465 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:30:08,465 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:30:08,466 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:30:08,466 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19e00a63{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:30:08,467 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@404c33{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:30:08,583 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1d0180e0{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/java.io.tmpdir/jetty-localhost-35439-hadoop-hdfs-3_4_1-tests_jar-_-any-16127694986413453199/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T18:30:08,583 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@38aa7c9e{HTTP/1.1, (http/1.1)}{localhost:35439} 2024-11-12T18:30:08,583 INFO [Time-limited test {}] server.Server(415): Started @104345ms 2024-11-12T18:30:08,598 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-12T18:30:08,666 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:30:08,670 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:30:08,671 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:30:08,671 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:30:08,671 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:30:08,673 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b222ecd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:30:08,673 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78fced7d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:30:08,789 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6da0e38{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/java.io.tmpdir/jetty-localhost-43529-hadoop-hdfs-3_4_1-tests_jar-_-any-12474005944363697107/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:30:08,789 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1b6f9452{HTTP/1.1, (http/1.1)}{localhost:43529} 2024-11-12T18:30:08,789 INFO [Time-limited test {}] server.Server(415): Started @104551ms 2024-11-12T18:30:08,791 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:30:08,829 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:30:08,833 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:30:08,833 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:30:08,834 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:30:08,834 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T18:30:08,834 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@31288d7e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:30:08,835 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57bb4526{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:30:08,904 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data1/current/BP-1467028132-172.17.0.3-1731436208400/current, will proceed with Du for space computation calculation, 2024-11-12T18:30:08,904 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data2/current/BP-1467028132-172.17.0.3-1731436208400/current, will proceed with Du for space computation calculation, 2024-11-12T18:30:08,929 WARN [Thread-635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:30:08,933 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb53b3d71337e74bb with lease ID 0x9f8f2543b3312376: Processing first storage report for DS-ceac852d-d1df-4db1-8922-b9f0dd45b386 from datanode DatanodeRegistration(127.0.0.1:41997, datanodeUuid=a2b56ddb-2d65-4ebf-a3e1-e88d80d4bcf1, infoPort=43271, infoSecurePort=0, ipcPort=38581, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400) 2024-11-12T18:30:08,933 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb53b3d71337e74bb with lease ID 0x9f8f2543b3312376: from storage DS-ceac852d-d1df-4db1-8922-b9f0dd45b386 node DatanodeRegistration(127.0.0.1:41997, datanodeUuid=a2b56ddb-2d65-4ebf-a3e1-e88d80d4bcf1, infoPort=43271, infoSecurePort=0, ipcPort=38581, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:30:08,933 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb53b3d71337e74bb with lease ID 0x9f8f2543b3312376: Processing first storage report for DS-84242f06-5a25-4ba0-ab7c-71383ba52e8d from datanode DatanodeRegistration(127.0.0.1:41997, datanodeUuid=a2b56ddb-2d65-4ebf-a3e1-e88d80d4bcf1, infoPort=43271, infoSecurePort=0, ipcPort=38581, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400) 2024-11-12T18:30:08,933 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb53b3d71337e74bb with lease ID 0x9f8f2543b3312376: from storage DS-84242f06-5a25-4ba0-ab7c-71383ba52e8d node DatanodeRegistration(127.0.0.1:41997, datanodeUuid=a2b56ddb-2d65-4ebf-a3e1-e88d80d4bcf1, infoPort=43271, infoSecurePort=0, ipcPort=38581, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-12T18:30:08,961 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f8cbb64{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/java.io.tmpdir/jetty-localhost-46721-hadoop-hdfs-3_4_1-tests_jar-_-any-1822998864498550826/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:30:08,961 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@33ef8e41{HTTP/1.1, (http/1.1)}{localhost:46721} 2024-11-12T18:30:08,961 INFO [Time-limited test {}] server.Server(415): Started @104723ms 2024-11-12T18:30:08,963 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:30:09,058 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data3/current/BP-1467028132-172.17.0.3-1731436208400/current, will proceed with Du for space computation calculation, 2024-11-12T18:30:09,058 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data4/current/BP-1467028132-172.17.0.3-1731436208400/current, will proceed with Du for space computation calculation, 2024-11-12T18:30:09,078 WARN [Thread-671 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:30:09,081 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd37bf2e0ee57dc01 with lease ID 0x9f8f2543b3312377: Processing first storage report for DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6 from datanode DatanodeRegistration(127.0.0.1:32921, datanodeUuid=f8f02f19-7c5d-473e-b688-c183935fab36, infoPort=43665, infoSecurePort=0, ipcPort=44109, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400) 2024-11-12T18:30:09,081 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd37bf2e0ee57dc01 with lease ID 0x9f8f2543b3312377: from storage DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6 node DatanodeRegistration(127.0.0.1:32921, datanodeUuid=f8f02f19-7c5d-473e-b688-c183935fab36, infoPort=43665, infoSecurePort=0, ipcPort=44109, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:30:09,081 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd37bf2e0ee57dc01 with lease ID 0x9f8f2543b3312377: Processing first storage report for DS-2fd37f29-e762-4d9e-bd6b-ea080d970bfd from datanode DatanodeRegistration(127.0.0.1:32921, datanodeUuid=f8f02f19-7c5d-473e-b688-c183935fab36, infoPort=43665, infoSecurePort=0, ipcPort=44109, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400) 2024-11-12T18:30:09,081 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd37bf2e0ee57dc01 with lease ID 0x9f8f2543b3312377: from storage DS-2fd37f29-e762-4d9e-bd6b-ea080d970bfd node DatanodeRegistration(127.0.0.1:32921, datanodeUuid=f8f02f19-7c5d-473e-b688-c183935fab36, infoPort=43665, infoSecurePort=0, ipcPort=44109, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:30:09,092 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd 2024-11-12T18:30:09,095 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/zookeeper_0, clientPort=50248, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-12T18:30:09,096 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50248 2024-11-12T18:30:09,096 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:09,098 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:09,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41997 is added to blk_1073741825_1001 (size=7) 2024-11-12T18:30:09,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741825_1001 (size=7) 2024-11-12T18:30:09,110 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1 with version=8 2024-11-12T18:30:09,110 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/hbase-staging 2024-11-12T18:30:09,112 INFO [Time-limited test {}] client.ConnectionUtils(128): master/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:30:09,112 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:09,112 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:09,113 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:30:09,113 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:09,113 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:30:09,113 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-12T18:30:09,113 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:30:09,114 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41511 2024-11-12T18:30:09,115 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41511 connecting to ZooKeeper ensemble=127.0.0.1:50248 2024-11-12T18:30:09,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:415110x0, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:30:09,121 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41511-0x1003541ee640000 connected 2024-11-12T18:30:09,124 INFO [regionserver/9911683f163c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:30:09,142 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:09,144 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:09,147 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:30:09,147 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1, hbase.cluster.distributed=false 2024-11-12T18:30:09,149 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:30:09,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41511 2024-11-12T18:30:09,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41511 2024-11-12T18:30:09,150 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41511 2024-11-12T18:30:09,150 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41511 2024-11-12T18:30:09,150 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41511 2024-11-12T18:30:09,168 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:30:09,168 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:09,168 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:09,168 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:30:09,168 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:09,168 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:30:09,168 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T18:30:09,169 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:30:09,169 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33915 2024-11-12T18:30:09,171 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33915 connecting to ZooKeeper ensemble=127.0.0.1:50248 2024-11-12T18:30:09,171 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:09,174 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:09,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:339150x0, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:30:09,179 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:339150x0, quorum=127.0.0.1:50248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:30:09,179 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33915-0x1003541ee640001 connected 2024-11-12T18:30:09,179 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T18:30:09,180 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T18:30:09,181 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33915-0x1003541ee640001, quorum=127.0.0.1:50248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T18:30:09,182 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33915-0x1003541ee640001, quorum=127.0.0.1:50248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:30:09,185 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33915 2024-11-12T18:30:09,186 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33915 2024-11-12T18:30:09,190 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33915 2024-11-12T18:30:09,190 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33915 2024-11-12T18:30:09,191 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33915 2024-11-12T18:30:09,204 DEBUG [M:0;9911683f163c:41511 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;9911683f163c:41511 2024-11-12T18:30:09,204 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/9911683f163c,41511,1731436209112 2024-11-12T18:30:09,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33915-0x1003541ee640001, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:30:09,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:30:09,207 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/9911683f163c,41511,1731436209112 2024-11-12T18:30:09,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33915-0x1003541ee640001, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T18:30:09,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:09,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33915-0x1003541ee640001, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:09,210 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-12T18:30:09,211 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/9911683f163c,41511,1731436209112 from backup master directory 2024-11-12T18:30:09,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33915-0x1003541ee640001, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:30:09,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/9911683f163c,41511,1731436209112 2024-11-12T18:30:09,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:30:09,213 WARN [master/9911683f163c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:30:09,213 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=9911683f163c,41511,1731436209112 2024-11-12T18:30:09,218 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/hbase.id] with ID: cad537ed-9b01-41d7-b999-876c072bcf34 2024-11-12T18:30:09,218 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/.tmp/hbase.id 2024-11-12T18:30:09,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741826_1002 (size=42) 2024-11-12T18:30:09,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41997 is added to blk_1073741826_1002 (size=42) 2024-11-12T18:30:09,231 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/.tmp/hbase.id]:[hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/hbase.id] 2024-11-12T18:30:09,246 INFO [master/9911683f163c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:09,246 INFO [master/9911683f163c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-12T18:30:09,248 INFO [master/9911683f163c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-12T18:30:09,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33915-0x1003541ee640001, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:09,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:09,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41997 is added to blk_1073741827_1003 (size=196) 2024-11-12T18:30:09,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741827_1003 (size=196) 2024-11-12T18:30:09,260 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T18:30:09,262 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-12T18:30:09,262 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:30:09,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741828_1004 (size=1189) 2024-11-12T18:30:09,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41997 is added to blk_1073741828_1004 (size=1189) 2024-11-12T18:30:09,276 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store 2024-11-12T18:30:09,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741829_1005 (size=34) 2024-11-12T18:30:09,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41997 is added to blk_1073741829_1005 (size=34) 2024-11-12T18:30:09,285 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:30:09,286 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T18:30:09,286 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:09,286 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:09,286 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T18:30:09,286 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:09,286 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:09,286 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731436209285Disabling compacts and flushes for region at 1731436209285Disabling writes for close at 1731436209286 (+1 ms)Writing region close event to WAL at 1731436209286Closed at 1731436209286 2024-11-12T18:30:09,287 WARN [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/.initializing 2024-11-12T18:30:09,287 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/WALs/9911683f163c,41511,1731436209112 2024-11-12T18:30:09,291 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C41511%2C1731436209112, suffix=, logDir=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/WALs/9911683f163c,41511,1731436209112, archiveDir=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/oldWALs, maxLogs=10 2024-11-12T18:30:09,291 INFO [master/9911683f163c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C41511%2C1731436209112.1731436209291 2024-11-12T18:30:09,302 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/WALs/9911683f163c,41511,1731436209112/9911683f163c%2C41511%2C1731436209112.1731436209291 2024-11-12T18:30:09,305 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43271:43271),(127.0.0.1/127.0.0.1:43665:43665)] 2024-11-12T18:30:09,307 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:30:09,307 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:30:09,307 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:09,307 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:09,310 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:09,312 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-12T18:30:09,312 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:09,313 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:09,313 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:09,315 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-12T18:30:09,315 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:09,315 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:30:09,315 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:09,317 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-12T18:30:09,317 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:09,317 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:30:09,317 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:09,319 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-12T18:30:09,319 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:09,320 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:30:09,320 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:09,321 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:09,321 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:09,323 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:09,323 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:09,323 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T18:30:09,325 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:09,328 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:30:09,329 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=827200, jitterRate=0.05183990299701691}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T18:30:09,331 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731436209308Initializing all the Stores at 1731436209309 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436209309Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436209310 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436209310Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436209310Cleaning up temporary data from old regions at 1731436209323 (+13 ms)Region opened successfully at 1731436209331 (+8 ms) 2024-11-12T18:30:09,331 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-12T18:30:09,336 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@356d50f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:30:09,337 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-12T18:30:09,337 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-12T18:30:09,337 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-12T18:30:09,338 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-12T18:30:09,338 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-12T18:30:09,339 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-12T18:30:09,339 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-12T18:30:09,342 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-12T18:30:09,343 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-12T18:30:09,345 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-12T18:30:09,345 INFO [master/9911683f163c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-12T18:30:09,346 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-12T18:30:09,347 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-12T18:30:09,348 INFO [master/9911683f163c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-12T18:30:09,349 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-12T18:30:09,351 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-12T18:30:09,352 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-12T18:30:09,353 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-12T18:30:09,355 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-12T18:30:09,357 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-12T18:30:09,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33915-0x1003541ee640001, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:30:09,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:30:09,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33915-0x1003541ee640001, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:09,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:09,359 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=9911683f163c,41511,1731436209112, sessionid=0x1003541ee640000, setting cluster-up flag (Was=false) 2024-11-12T18:30:09,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:09,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33915-0x1003541ee640001, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:09,368 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-12T18:30:09,370 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9911683f163c,41511,1731436209112 2024-11-12T18:30:09,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:09,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33915-0x1003541ee640001, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:09,378 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-12T18:30:09,380 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9911683f163c,41511,1731436209112 2024-11-12T18:30:09,381 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-12T18:30:09,383 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-12T18:30:09,384 INFO [master/9911683f163c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-12T18:30:09,384 INFO [master/9911683f163c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-12T18:30:09,384 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 9911683f163c,41511,1731436209112 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-12T18:30:09,385 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:30:09,385 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:30:09,385 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:30:09,385 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:30:09,386 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/9911683f163c:0, corePoolSize=10, maxPoolSize=10 2024-11-12T18:30:09,386 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:09,386 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:30:09,386 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:09,386 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731436239386 2024-11-12T18:30:09,387 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-12T18:30:09,387 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-12T18:30:09,387 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-12T18:30:09,387 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-12T18:30:09,387 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-12T18:30:09,387 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-12T18:30:09,387 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:09,387 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-12T18:30:09,388 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-12T18:30:09,388 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:30:09,388 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-12T18:30:09,388 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-12T18:30:09,388 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-12T18:30:09,388 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-12T18:30:09,389 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436209389,5,FailOnTimeoutGroup] 2024-11-12T18:30:09,389 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436209389,5,FailOnTimeoutGroup] 2024-11-12T18:30:09,389 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:09,389 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-12T18:30:09,389 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:09,389 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:09,389 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:09,389 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-12T18:30:09,392 INFO [RS:0;9911683f163c:33915 {}] regionserver.HRegionServer(746): ClusterId : cad537ed-9b01-41d7-b999-876c072bcf34 2024-11-12T18:30:09,393 DEBUG [RS:0;9911683f163c:33915 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T18:30:09,395 DEBUG [RS:0;9911683f163c:33915 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T18:30:09,395 DEBUG [RS:0;9911683f163c:33915 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T18:30:09,397 DEBUG [RS:0;9911683f163c:33915 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T18:30:09,397 DEBUG [RS:0;9911683f163c:33915 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@503283b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:30:09,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741831_1007 (size=1321) 2024-11-12T18:30:09,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41997 is added to blk_1073741831_1007 (size=1321) 2024-11-12T18:30:09,404 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-12T18:30:09,404 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1 2024-11-12T18:30:09,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41997 is added to blk_1073741832_1008 (size=32) 2024-11-12T18:30:09,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741832_1008 (size=32) 2024-11-12T18:30:09,414 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:30:09,416 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T18:30:09,418 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T18:30:09,418 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:09,419 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:09,419 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T18:30:09,421 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T18:30:09,421 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:09,422 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:09,422 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T18:30:09,422 DEBUG [RS:0;9911683f163c:33915 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;9911683f163c:33915 2024-11-12T18:30:09,422 INFO [RS:0;9911683f163c:33915 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T18:30:09,422 INFO [RS:0;9911683f163c:33915 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T18:30:09,422 DEBUG [RS:0;9911683f163c:33915 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T18:30:09,423 INFO [RS:0;9911683f163c:33915 {}] regionserver.HRegionServer(2659): reportForDuty to master=9911683f163c,41511,1731436209112 with port=33915, startcode=1731436209168 2024-11-12T18:30:09,423 DEBUG [RS:0;9911683f163c:33915 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T18:30:09,424 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T18:30:09,424 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:09,424 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:09,424 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T18:30:09,426 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T18:30:09,426 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:09,427 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49633, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T18:30:09,427 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:09,427 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T18:30:09,427 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41511 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9911683f163c,33915,1731436209168 2024-11-12T18:30:09,428 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41511 {}] master.ServerManager(517): Registering regionserver=9911683f163c,33915,1731436209168 2024-11-12T18:30:09,428 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740 2024-11-12T18:30:09,429 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740 2024-11-12T18:30:09,430 DEBUG [RS:0;9911683f163c:33915 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1 2024-11-12T18:30:09,430 DEBUG [RS:0;9911683f163c:33915 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37157 2024-11-12T18:30:09,430 DEBUG [RS:0;9911683f163c:33915 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T18:30:09,431 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T18:30:09,431 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T18:30:09,431 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T18:30:09,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:30:09,433 DEBUG [RS:0;9911683f163c:33915 {}] zookeeper.ZKUtil(111): regionserver:33915-0x1003541ee640001, quorum=127.0.0.1:50248, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9911683f163c,33915,1731436209168 2024-11-12T18:30:09,433 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T18:30:09,433 WARN [RS:0;9911683f163c:33915 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:30:09,433 INFO [RS:0;9911683f163c:33915 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:30:09,433 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9911683f163c,33915,1731436209168] 2024-11-12T18:30:09,433 DEBUG [RS:0;9911683f163c:33915 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168 2024-11-12T18:30:09,437 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:30:09,438 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=850749, jitterRate=0.08178350329399109}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T18:30:09,438 INFO [RS:0;9911683f163c:33915 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T18:30:09,439 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731436209414Initializing all the Stores at 1731436209415 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436209415Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436209416 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436209416Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436209416Cleaning up temporary data from old regions at 1731436209431 (+15 ms)Region opened successfully at 1731436209439 (+8 ms) 2024-11-12T18:30:09,439 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T18:30:09,439 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T18:30:09,439 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T18:30:09,439 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T18:30:09,439 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T18:30:09,440 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T18:30:09,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731436209439Disabling compacts and flushes for region at 1731436209439Disabling writes for close at 1731436209439Writing region close event to WAL at 1731436209440 (+1 ms)Closed at 1731436209440 2024-11-12T18:30:09,441 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:30:09,441 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-12T18:30:09,441 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-12T18:30:09,442 INFO [RS:0;9911683f163c:33915 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T18:30:09,443 INFO [RS:0;9911683f163c:33915 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T18:30:09,443 INFO [RS:0;9911683f163c:33915 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:09,443 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T18:30:09,444 INFO [RS:0;9911683f163c:33915 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T18:30:09,445 INFO [RS:0;9911683f163c:33915 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T18:30:09,445 INFO [RS:0;9911683f163c:33915 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:09,445 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-12T18:30:09,445 DEBUG [RS:0;9911683f163c:33915 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:09,445 DEBUG [RS:0;9911683f163c:33915 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:09,446 DEBUG [RS:0;9911683f163c:33915 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:09,446 DEBUG [RS:0;9911683f163c:33915 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:09,446 DEBUG [RS:0;9911683f163c:33915 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:09,446 DEBUG [RS:0;9911683f163c:33915 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:30:09,446 DEBUG [RS:0;9911683f163c:33915 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:09,446 DEBUG [RS:0;9911683f163c:33915 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:09,446 DEBUG [RS:0;9911683f163c:33915 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:09,446 DEBUG [RS:0;9911683f163c:33915 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:09,446 DEBUG [RS:0;9911683f163c:33915 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:09,446 DEBUG [RS:0;9911683f163c:33915 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:09,446 DEBUG [RS:0;9911683f163c:33915 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:30:09,446 DEBUG [RS:0;9911683f163c:33915 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:30:09,449 INFO [RS:0;9911683f163c:33915 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:09,449 INFO [RS:0;9911683f163c:33915 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:09,449 INFO [RS:0;9911683f163c:33915 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:09,449 INFO [RS:0;9911683f163c:33915 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:09,449 INFO [RS:0;9911683f163c:33915 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:09,449 INFO [RS:0;9911683f163c:33915 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,33915,1731436209168-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:30:09,469 INFO [RS:0;9911683f163c:33915 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T18:30:09,469 INFO [RS:0;9911683f163c:33915 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,33915,1731436209168-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:09,470 INFO [RS:0;9911683f163c:33915 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:09,470 INFO [RS:0;9911683f163c:33915 {}] regionserver.Replication(171): 9911683f163c,33915,1731436209168 started 2024-11-12T18:30:09,486 INFO [RS:0;9911683f163c:33915 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:09,486 INFO [RS:0;9911683f163c:33915 {}] regionserver.HRegionServer(1482): Serving as 9911683f163c,33915,1731436209168, RpcServer on 9911683f163c/172.17.0.3:33915, sessionid=0x1003541ee640001 2024-11-12T18:30:09,486 DEBUG [RS:0;9911683f163c:33915 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T18:30:09,486 DEBUG [RS:0;9911683f163c:33915 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9911683f163c,33915,1731436209168 2024-11-12T18:30:09,486 DEBUG [RS:0;9911683f163c:33915 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,33915,1731436209168' 2024-11-12T18:30:09,486 DEBUG [RS:0;9911683f163c:33915 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T18:30:09,487 DEBUG [RS:0;9911683f163c:33915 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T18:30:09,488 DEBUG [RS:0;9911683f163c:33915 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T18:30:09,488 DEBUG [RS:0;9911683f163c:33915 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T18:30:09,488 DEBUG [RS:0;9911683f163c:33915 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9911683f163c,33915,1731436209168 2024-11-12T18:30:09,488 DEBUG [RS:0;9911683f163c:33915 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,33915,1731436209168' 2024-11-12T18:30:09,488 DEBUG [RS:0;9911683f163c:33915 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T18:30:09,489 DEBUG [RS:0;9911683f163c:33915 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T18:30:09,489 DEBUG [RS:0;9911683f163c:33915 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T18:30:09,490 INFO [RS:0;9911683f163c:33915 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T18:30:09,490 INFO [RS:0;9911683f163c:33915 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T18:30:09,585 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T18:30:09,586 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-12T18:30:09,586 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-12T18:30:09,592 INFO [RS:0;9911683f163c:33915 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C33915%2C1731436209168, suffix=, logDir=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168, archiveDir=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/oldWALs, maxLogs=32 2024-11-12T18:30:09,593 INFO [RS:0;9911683f163c:33915 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C33915%2C1731436209168.1731436209593 2024-11-12T18:30:09,596 WARN [9911683f163c:41511 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-12T18:30:09,601 INFO [RS:0;9911683f163c:33915 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436209593 2024-11-12T18:30:09,608 DEBUG [RS:0;9911683f163c:33915 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43665:43665),(127.0.0.1/127.0.0.1:43271:43271)] 2024-11-12T18:30:09,846 DEBUG [9911683f163c:41511 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-12T18:30:09,847 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=9911683f163c,33915,1731436209168 2024-11-12T18:30:09,849 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9911683f163c,33915,1731436209168, state=OPENING 2024-11-12T18:30:09,852 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-12T18:30:09,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33915-0x1003541ee640001, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:09,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:09,856 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:30:09,856 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:30:09,856 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T18:30:09,856 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=9911683f163c,33915,1731436209168}] 2024-11-12T18:30:09,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:09,873 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:10,010 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T18:30:10,012 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55839, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T18:30:10,017 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-12T18:30:10,017 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:30:10,020 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C33915%2C1731436209168.meta, suffix=.meta, logDir=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168, archiveDir=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/oldWALs, maxLogs=32 2024-11-12T18:30:10,021 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta 2024-11-12T18:30:10,027 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta 2024-11-12T18:30:10,028 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43271:43271),(127.0.0.1/127.0.0.1:43665:43665)] 2024-11-12T18:30:10,029 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:30:10,029 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-12T18:30:10,030 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-12T18:30:10,030 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-12T18:30:10,030 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-12T18:30:10,030 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:30:10,030 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-12T18:30:10,030 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-12T18:30:10,032 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T18:30:10,033 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T18:30:10,033 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:10,034 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:10,034 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T18:30:10,035 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T18:30:10,035 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:10,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:10,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T18:30:10,036 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T18:30:10,037 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:10,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:10,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T18:30:10,038 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T18:30:10,038 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:10,038 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:10,039 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T18:30:10,040 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740 2024-11-12T18:30:10,041 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740 2024-11-12T18:30:10,042 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T18:30:10,042 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T18:30:10,043 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T18:30:10,045 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T18:30:10,045 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=696215, jitterRate=-0.1147172600030899}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T18:30:10,046 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-12T18:30:10,046 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731436210030Writing region info on filesystem at 1731436210030Initializing all the Stores at 1731436210032 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436210032Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436210032Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436210032Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436210032Cleaning up temporary data from old regions at 1731436210042 (+10 ms)Running coprocessor post-open hooks at 1731436210046 (+4 ms)Region opened successfully at 1731436210046 2024-11-12T18:30:10,048 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731436210009 2024-11-12T18:30:10,051 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-12T18:30:10,051 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-12T18:30:10,052 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=9911683f163c,33915,1731436209168 2024-11-12T18:30:10,053 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9911683f163c,33915,1731436209168, state=OPEN 2024-11-12T18:30:10,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:30:10,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33915-0x1003541ee640001, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:30:10,058 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=9911683f163c,33915,1731436209168 2024-11-12T18:30:10,058 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:30:10,058 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:30:10,062 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-12T18:30:10,062 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=9911683f163c,33915,1731436209168 in 202 msec 2024-11-12T18:30:10,066 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-12T18:30:10,066 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 621 msec 2024-11-12T18:30:10,067 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:30:10,067 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-12T18:30:10,069 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T18:30:10,069 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9911683f163c,33915,1731436209168, seqNum=-1] 2024-11-12T18:30:10,069 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T18:30:10,070 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39419, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T18:30:10,078 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 693 msec 2024-11-12T18:30:10,078 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731436210078, completionTime=-1 2024-11-12T18:30:10,079 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-12T18:30:10,079 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-12T18:30:10,082 INFO [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-12T18:30:10,082 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731436270082 2024-11-12T18:30:10,082 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731436330082 2024-11-12T18:30:10,082 INFO [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 3 msec 2024-11-12T18:30:10,082 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,41511,1731436209112-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:10,082 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,41511,1731436209112-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:10,082 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,41511,1731436209112-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:10,082 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-9911683f163c:41511, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:10,082 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:10,083 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:10,084 DEBUG [master/9911683f163c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-12T18:30:10,087 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.874sec 2024-11-12T18:30:10,087 INFO [master/9911683f163c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-12T18:30:10,087 INFO [master/9911683f163c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-12T18:30:10,087 INFO [master/9911683f163c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-12T18:30:10,087 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-12T18:30:10,087 INFO [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-12T18:30:10,087 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,41511,1731436209112-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:30:10,087 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,41511,1731436209112-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-12T18:30:10,090 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-12T18:30:10,090 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-12T18:30:10,090 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,41511,1731436209112-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:10,110 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57efacc6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:30:10,110 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 9911683f163c,41511,-1 for getting cluster id 2024-11-12T18:30:10,111 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-12T18:30:10,113 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'cad537ed-9b01-41d7-b999-876c072bcf34' 2024-11-12T18:30:10,113 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-12T18:30:10,113 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "cad537ed-9b01-41d7-b999-876c072bcf34" 2024-11-12T18:30:10,114 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3977a526, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:30:10,114 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9911683f163c,41511,-1] 2024-11-12T18:30:10,114 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-12T18:30:10,114 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:30:10,116 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39068, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-12T18:30:10,117 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d75c890, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:30:10,118 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T18:30:10,119 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9911683f163c,33915,1731436209168, seqNum=-1] 2024-11-12T18:30:10,119 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T18:30:10,121 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42832, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T18:30:10,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=9911683f163c,41511,1731436209112 2024-11-12T18:30:10,124 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:10,126 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-12T18:30:10,143 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:30:10,143 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:10,143 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:10,143 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:30:10,143 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:10,144 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:30:10,144 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T18:30:10,144 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:30:10,145 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44807 2024-11-12T18:30:10,146 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44807 connecting to ZooKeeper ensemble=127.0.0.1:50248 2024-11-12T18:30:10,147 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:10,148 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:10,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:448070x0, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:30:10,153 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44807-0x1003541ee640002 connected 2024-11-12T18:30:10,153 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:44807-0x1003541ee640002, quorum=127.0.0.1:50248, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-12T18:30:10,153 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-12T18:30:10,154 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T18:30:10,155 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T18:30:10,155 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:44807-0x1003541ee640002, quorum=127.0.0.1:50248, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-12T18:30:10,157 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44807-0x1003541ee640002, quorum=127.0.0.1:50248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:30:10,160 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44807 2024-11-12T18:30:10,161 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44807 2024-11-12T18:30:10,161 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44807 2024-11-12T18:30:10,163 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44807 2024-11-12T18:30:10,163 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44807 2024-11-12T18:30:10,164 INFO [RS:1;9911683f163c:44807 {}] regionserver.HRegionServer(746): ClusterId : cad537ed-9b01-41d7-b999-876c072bcf34 2024-11-12T18:30:10,164 DEBUG [RS:1;9911683f163c:44807 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T18:30:10,166 DEBUG [RS:1;9911683f163c:44807 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T18:30:10,166 DEBUG [RS:1;9911683f163c:44807 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T18:30:10,168 DEBUG [RS:1;9911683f163c:44807 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T18:30:10,169 DEBUG [RS:1;9911683f163c:44807 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8e15db8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:30:10,181 DEBUG [RS:1;9911683f163c:44807 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;9911683f163c:44807 2024-11-12T18:30:10,182 INFO [RS:1;9911683f163c:44807 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T18:30:10,182 INFO [RS:1;9911683f163c:44807 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T18:30:10,182 DEBUG [RS:1;9911683f163c:44807 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T18:30:10,183 INFO [RS:1;9911683f163c:44807 {}] regionserver.HRegionServer(2659): reportForDuty to master=9911683f163c,41511,1731436209112 with port=44807, startcode=1731436210143 2024-11-12T18:30:10,183 DEBUG [RS:1;9911683f163c:44807 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T18:30:10,185 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60255, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T18:30:10,185 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41511 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9911683f163c,44807,1731436210143 2024-11-12T18:30:10,186 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41511 {}] master.ServerManager(517): Registering regionserver=9911683f163c,44807,1731436210143 2024-11-12T18:30:10,187 DEBUG [RS:1;9911683f163c:44807 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1 2024-11-12T18:30:10,187 DEBUG [RS:1;9911683f163c:44807 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37157 2024-11-12T18:30:10,187 DEBUG [RS:1;9911683f163c:44807 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T18:30:10,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:30:10,190 DEBUG [RS:1;9911683f163c:44807 {}] zookeeper.ZKUtil(111): regionserver:44807-0x1003541ee640002, quorum=127.0.0.1:50248, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9911683f163c,44807,1731436210143 2024-11-12T18:30:10,190 WARN [RS:1;9911683f163c:44807 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:30:10,190 INFO [RS:1;9911683f163c:44807 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:30:10,190 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9911683f163c,44807,1731436210143] 2024-11-12T18:30:10,190 DEBUG [RS:1;9911683f163c:44807 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143 2024-11-12T18:30:10,194 INFO [RS:1;9911683f163c:44807 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T18:30:10,195 INFO [RS:1;9911683f163c:44807 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T18:30:10,196 INFO [RS:1;9911683f163c:44807 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T18:30:10,196 INFO [RS:1;9911683f163c:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:10,196 INFO [RS:1;9911683f163c:44807 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T18:30:10,197 INFO [RS:1;9911683f163c:44807 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T18:30:10,197 INFO [RS:1;9911683f163c:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:10,197 DEBUG [RS:1;9911683f163c:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:10,197 DEBUG [RS:1;9911683f163c:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:10,198 DEBUG [RS:1;9911683f163c:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:10,198 DEBUG [RS:1;9911683f163c:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:10,198 DEBUG [RS:1;9911683f163c:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:10,198 DEBUG [RS:1;9911683f163c:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:30:10,198 DEBUG [RS:1;9911683f163c:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:10,198 DEBUG [RS:1;9911683f163c:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:10,198 DEBUG [RS:1;9911683f163c:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:10,198 DEBUG [RS:1;9911683f163c:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:10,198 DEBUG [RS:1;9911683f163c:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:10,198 DEBUG [RS:1;9911683f163c:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:10,198 DEBUG [RS:1;9911683f163c:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:30:10,198 DEBUG [RS:1;9911683f163c:44807 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:30:10,198 INFO [RS:1;9911683f163c:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:10,198 INFO [RS:1;9911683f163c:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:10,198 INFO [RS:1;9911683f163c:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:10,198 INFO [RS:1;9911683f163c:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:10,198 INFO [RS:1;9911683f163c:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:10,199 INFO [RS:1;9911683f163c:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,44807,1731436210143-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:30:10,216 INFO [RS:1;9911683f163c:44807 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T18:30:10,216 INFO [RS:1;9911683f163c:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,44807,1731436210143-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:10,216 INFO [RS:1;9911683f163c:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:10,216 INFO [RS:1;9911683f163c:44807 {}] regionserver.Replication(171): 9911683f163c,44807,1731436210143 started 2024-11-12T18:30:10,231 INFO [RS:1;9911683f163c:44807 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:10,231 INFO [RS:1;9911683f163c:44807 {}] regionserver.HRegionServer(1482): Serving as 9911683f163c,44807,1731436210143, RpcServer on 9911683f163c/172.17.0.3:44807, sessionid=0x1003541ee640002 2024-11-12T18:30:10,231 DEBUG [RS:1;9911683f163c:44807 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T18:30:10,231 DEBUG [RS:1;9911683f163c:44807 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9911683f163c,44807,1731436210143 2024-11-12T18:30:10,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;9911683f163c:44807,5,FailOnTimeoutGroup] 2024-11-12T18:30:10,231 DEBUG [RS:1;9911683f163c:44807 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,44807,1731436210143' 2024-11-12T18:30:10,232 DEBUG [RS:1;9911683f163c:44807 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T18:30:10,232 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-12T18:30:10,232 DEBUG [RS:1;9911683f163c:44807 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T18:30:10,232 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-12T18:30:10,233 DEBUG [RS:1;9911683f163c:44807 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T18:30:10,233 DEBUG [RS:1;9911683f163c:44807 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T18:30:10,233 DEBUG [RS:1;9911683f163c:44807 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9911683f163c,44807,1731436210143 2024-11-12T18:30:10,233 DEBUG [RS:1;9911683f163c:44807 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,44807,1731436210143' 2024-11-12T18:30:10,233 DEBUG [RS:1;9911683f163c:44807 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T18:30:10,233 DEBUG [RS:1;9911683f163c:44807 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T18:30:10,233 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 9911683f163c,41511,1731436209112 2024-11-12T18:30:10,234 DEBUG [RS:1;9911683f163c:44807 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T18:30:10,234 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2c3e3ca9 2024-11-12T18:30:10,234 INFO [RS:1;9911683f163c:44807 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T18:30:10,234 INFO [RS:1;9911683f163c:44807 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T18:30:10,234 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-12T18:30:10,236 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39076, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-12T18:30:10,237 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41511 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-12T18:30:10,237 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41511 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-12T18:30:10,237 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41511 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T18:30:10,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41511 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-12T18:30:10,240 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-12T18:30:10,240 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:10,241 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41511 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-12T18:30:10,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T18:30:10,242 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-12T18:30:10,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41997 is added to blk_1073741835_1011 (size=393) 2024-11-12T18:30:10,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741835_1011 (size=393) 2024-11-12T18:30:10,252 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 9450309108ebec67e3b1a70fb902d6f1, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1 2024-11-12T18:30:10,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41997 is added to blk_1073741836_1012 (size=76) 2024-11-12T18:30:10,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741836_1012 (size=76) 2024-11-12T18:30:10,261 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:30:10,261 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 9450309108ebec67e3b1a70fb902d6f1, disabling compactions & flushes 2024-11-12T18:30:10,261 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1. 2024-11-12T18:30:10,261 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1. 2024-11-12T18:30:10,261 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1. after waiting 0 ms 2024-11-12T18:30:10,261 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1. 2024-11-12T18:30:10,261 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1. 2024-11-12T18:30:10,261 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 9450309108ebec67e3b1a70fb902d6f1: Waiting for close lock at 1731436210261Disabling compacts and flushes for region at 1731436210261Disabling writes for close at 1731436210261Writing region close event to WAL at 1731436210261Closed at 1731436210261 2024-11-12T18:30:10,263 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-12T18:30:10,264 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731436210264"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731436210264"}]},"ts":"1731436210264"} 2024-11-12T18:30:10,267 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-12T18:30:10,268 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-12T18:30:10,269 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731436210268"}]},"ts":"1731436210268"} 2024-11-12T18:30:10,271 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-12T18:30:10,271 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=9450309108ebec67e3b1a70fb902d6f1, ASSIGN}] 2024-11-12T18:30:10,273 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=9450309108ebec67e3b1a70fb902d6f1, ASSIGN 2024-11-12T18:30:10,275 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=9450309108ebec67e3b1a70fb902d6f1, ASSIGN; state=OFFLINE, location=9911683f163c,33915,1731436209168; forceNewPlan=false, retain=false 2024-11-12T18:30:10,336 INFO [RS:1;9911683f163c:44807 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C44807%2C1731436210143, suffix=, logDir=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143, archiveDir=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/oldWALs, maxLogs=32 2024-11-12T18:30:10,337 INFO [RS:1;9911683f163c:44807 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C44807%2C1731436210143.1731436210337 2024-11-12T18:30:10,343 INFO [RS:1;9911683f163c:44807 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 2024-11-12T18:30:10,346 DEBUG [RS:1;9911683f163c:44807 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43271:43271),(127.0.0.1/127.0.0.1:43665:43665)] 2024-11-12T18:30:10,412 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-12T18:30:10,416 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:10,426 INFO [9911683f163c:41511 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-12T18:30:10,426 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9450309108ebec67e3b1a70fb902d6f1, regionState=OPENING, regionLocation=9911683f163c,33915,1731436209168 2024-11-12T18:30:10,429 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=9450309108ebec67e3b1a70fb902d6f1, ASSIGN because future has completed 2024-11-12T18:30:10,430 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9450309108ebec67e3b1a70fb902d6f1, server=9911683f163c,33915,1731436209168}] 2024-11-12T18:30:10,431 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:10,434 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:10,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:10,589 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1. 2024-11-12T18:30:10,589 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 9450309108ebec67e3b1a70fb902d6f1, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1.', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:30:10,590 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 9450309108ebec67e3b1a70fb902d6f1 2024-11-12T18:30:10,590 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:30:10,590 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 9450309108ebec67e3b1a70fb902d6f1 2024-11-12T18:30:10,590 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 9450309108ebec67e3b1a70fb902d6f1 2024-11-12T18:30:10,592 INFO [StoreOpener-9450309108ebec67e3b1a70fb902d6f1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9450309108ebec67e3b1a70fb902d6f1 2024-11-12T18:30:10,593 INFO [StoreOpener-9450309108ebec67e3b1a70fb902d6f1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9450309108ebec67e3b1a70fb902d6f1 columnFamilyName info 2024-11-12T18:30:10,593 DEBUG [StoreOpener-9450309108ebec67e3b1a70fb902d6f1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:10,594 INFO [StoreOpener-9450309108ebec67e3b1a70fb902d6f1-1 {}] regionserver.HStore(327): Store=9450309108ebec67e3b1a70fb902d6f1/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:30:10,594 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 9450309108ebec67e3b1a70fb902d6f1 2024-11-12T18:30:10,595 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1 2024-11-12T18:30:10,595 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1 2024-11-12T18:30:10,595 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 9450309108ebec67e3b1a70fb902d6f1 2024-11-12T18:30:10,595 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 9450309108ebec67e3b1a70fb902d6f1 2024-11-12T18:30:10,597 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 9450309108ebec67e3b1a70fb902d6f1 2024-11-12T18:30:10,599 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:30:10,599 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 9450309108ebec67e3b1a70fb902d6f1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856757, jitterRate=0.08942335844039917}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-12T18:30:10,600 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9450309108ebec67e3b1a70fb902d6f1 2024-11-12T18:30:10,600 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 9450309108ebec67e3b1a70fb902d6f1: Running coprocessor pre-open hook at 1731436210590Writing region info on filesystem at 1731436210590Initializing all the Stores at 1731436210591 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436210591Cleaning up temporary data from old regions at 1731436210595 (+4 ms)Running coprocessor post-open hooks at 1731436210600 (+5 ms)Region opened successfully at 1731436210600 2024-11-12T18:30:10,601 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1., pid=6, masterSystemTime=1731436210584 2024-11-12T18:30:10,604 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1. 2024-11-12T18:30:10,604 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1. 2024-11-12T18:30:10,605 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9450309108ebec67e3b1a70fb902d6f1, regionState=OPEN, openSeqNum=2, regionLocation=9911683f163c,33915,1731436209168 2024-11-12T18:30:10,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9450309108ebec67e3b1a70fb902d6f1, server=9911683f163c,33915,1731436209168 because future has completed 2024-11-12T18:30:10,612 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-12T18:30:10,612 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 9450309108ebec67e3b1a70fb902d6f1, server=9911683f163c,33915,1731436209168 in 179 msec 2024-11-12T18:30:10,614 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-12T18:30:10,614 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=9450309108ebec67e3b1a70fb902d6f1, ASSIGN in 341 msec 2024-11-12T18:30:10,615 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-12T18:30:10,616 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731436210615"}]},"ts":"1731436210615"} 2024-11-12T18:30:10,618 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-12T18:30:10,619 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-12T18:30:10,621 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 382 msec 2024-11-12T18:30:15,532 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-12T18:30:15,534 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:15,549 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:15,550 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:15,551 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:15,558 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-12T18:30:19,585 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-12T18:30:19,585 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-12T18:30:19,586 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-12T18:30:19,586 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-12T18:30:19,587 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T18:30:19,587 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-12T18:30:20,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T18:30:20,287 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-12T18:30:20,288 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-12T18:30:20,291 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-12T18:30:20,291 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1. 2024-11-12T18:30:20,305 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:30:20,309 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:30:20,309 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:30:20,309 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:30:20,309 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:30:20,310 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68953e7c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:30:20,310 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10f52e31{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:30:20,425 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@18da41c3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/java.io.tmpdir/jetty-localhost-33609-hadoop-hdfs-3_4_1-tests_jar-_-any-4472525324715346177/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:30:20,426 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3158d3ef{HTTP/1.1, (http/1.1)}{localhost:33609} 2024-11-12T18:30:20,426 INFO [Time-limited test {}] server.Server(415): Started @116188ms 2024-11-12T18:30:20,427 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:30:20,463 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:30:20,467 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:30:20,469 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:30:20,469 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:30:20,469 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:30:20,469 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@98b10a0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:30:20,470 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77bdda7a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:30:20,524 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data5/current/BP-1467028132-172.17.0.3-1731436208400/current, will proceed with Du for space computation calculation, 2024-11-12T18:30:20,525 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data6/current/BP-1467028132-172.17.0.3-1731436208400/current, will proceed with Du for space computation calculation, 2024-11-12T18:30:20,551 WARN [Thread-808 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:30:20,555 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbf0efff2864c6a45 with lease ID 0x9f8f2543b3312378: Processing first storage report for DS-4b1a099b-c591-4a37-9137-23876cb6b31c from datanode DatanodeRegistration(127.0.0.1:34767, datanodeUuid=e73c677b-95b6-4384-996f-29dbbd9a76dd, infoPort=38133, infoSecurePort=0, ipcPort=36133, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400) 2024-11-12T18:30:20,555 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbf0efff2864c6a45 with lease ID 0x9f8f2543b3312378: from storage DS-4b1a099b-c591-4a37-9137-23876cb6b31c node DatanodeRegistration(127.0.0.1:34767, datanodeUuid=e73c677b-95b6-4384-996f-29dbbd9a76dd, infoPort=38133, infoSecurePort=0, ipcPort=36133, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:30:20,555 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbf0efff2864c6a45 with lease ID 0x9f8f2543b3312378: Processing first storage report for DS-ab0c8879-c51c-4d6a-8c91-c7f53ccc0a9c from datanode DatanodeRegistration(127.0.0.1:34767, datanodeUuid=e73c677b-95b6-4384-996f-29dbbd9a76dd, infoPort=38133, infoSecurePort=0, ipcPort=36133, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400) 2024-11-12T18:30:20,555 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbf0efff2864c6a45 with lease ID 0x9f8f2543b3312378: from storage DS-ab0c8879-c51c-4d6a-8c91-c7f53ccc0a9c node DatanodeRegistration(127.0.0.1:34767, datanodeUuid=e73c677b-95b6-4384-996f-29dbbd9a76dd, infoPort=38133, infoSecurePort=0, ipcPort=36133, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:30:20,588 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@28e83d63{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/java.io.tmpdir/jetty-localhost-39291-hadoop-hdfs-3_4_1-tests_jar-_-any-17003761504018309660/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:30:20,588 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@908381a{HTTP/1.1, (http/1.1)}{localhost:39291} 2024-11-12T18:30:20,588 INFO [Time-limited test {}] server.Server(415): Started @116350ms 2024-11-12T18:30:20,590 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:30:20,627 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:30:20,631 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:30:20,633 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:30:20,633 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:30:20,633 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:30:20,634 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b24a09{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:30:20,634 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6ea99ccd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:30:20,682 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data7/current/BP-1467028132-172.17.0.3-1731436208400/current, will proceed with Du for space computation calculation, 2024-11-12T18:30:20,683 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data8/current/BP-1467028132-172.17.0.3-1731436208400/current, will proceed with Du for space computation calculation, 2024-11-12T18:30:20,712 WARN [Thread-843 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:30:20,714 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9dd8c18d931994e7 with lease ID 0x9f8f2543b3312379: Processing first storage report for DS-b65245f5-106f-4831-85b6-78ca3d6418b3 from datanode DatanodeRegistration(127.0.0.1:41099, datanodeUuid=8eef5b83-1e56-4eb5-b3f6-1199d5227c7c, infoPort=32967, infoSecurePort=0, ipcPort=41825, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400) 2024-11-12T18:30:20,714 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9dd8c18d931994e7 with lease ID 0x9f8f2543b3312379: from storage DS-b65245f5-106f-4831-85b6-78ca3d6418b3 node DatanodeRegistration(127.0.0.1:41099, datanodeUuid=8eef5b83-1e56-4eb5-b3f6-1199d5227c7c, infoPort=32967, infoSecurePort=0, ipcPort=41825, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:30:20,714 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9dd8c18d931994e7 with lease ID 0x9f8f2543b3312379: Processing first storage report for DS-df29d27a-b027-4934-891c-9abcee6770db from datanode DatanodeRegistration(127.0.0.1:41099, datanodeUuid=8eef5b83-1e56-4eb5-b3f6-1199d5227c7c, infoPort=32967, infoSecurePort=0, ipcPort=41825, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400) 2024-11-12T18:30:20,715 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9dd8c18d931994e7 with lease ID 0x9f8f2543b3312379: from storage DS-df29d27a-b027-4934-891c-9abcee6770db node DatanodeRegistration(127.0.0.1:41099, datanodeUuid=8eef5b83-1e56-4eb5-b3f6-1199d5227c7c, infoPort=32967, infoSecurePort=0, ipcPort=41825, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:30:20,760 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1b920423{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/java.io.tmpdir/jetty-localhost-36777-hadoop-hdfs-3_4_1-tests_jar-_-any-9655463583212756147/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:30:20,760 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4bf203bb{HTTP/1.1, (http/1.1)}{localhost:36777} 2024-11-12T18:30:20,760 INFO [Time-limited test {}] server.Server(415): Started @116522ms 2024-11-12T18:30:20,762 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:30:20,843 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data9/current/BP-1467028132-172.17.0.3-1731436208400/current, will proceed with Du for space computation calculation, 2024-11-12T18:30:20,844 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data10/current/BP-1467028132-172.17.0.3-1731436208400/current, will proceed with Du for space computation calculation, 2024-11-12T18:30:20,867 WARN [Thread-878 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:30:20,871 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeb2a920a7cede3af with lease ID 0x9f8f2543b331237a: Processing first storage report for DS-e3794d09-5026-4dd9-8405-2324400e0891 from datanode DatanodeRegistration(127.0.0.1:37263, datanodeUuid=fd267263-b1a7-4399-bb1a-388d434d391b, infoPort=42561, infoSecurePort=0, ipcPort=38635, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400) 2024-11-12T18:30:20,871 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeb2a920a7cede3af with lease ID 0x9f8f2543b331237a: from storage DS-e3794d09-5026-4dd9-8405-2324400e0891 node DatanodeRegistration(127.0.0.1:37263, datanodeUuid=fd267263-b1a7-4399-bb1a-388d434d391b, infoPort=42561, infoSecurePort=0, ipcPort=38635, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:30:20,871 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeb2a920a7cede3af with lease ID 0x9f8f2543b331237a: Processing first storage report for DS-516f219d-6785-4145-b6e4-a2508f512a1c from datanode DatanodeRegistration(127.0.0.1:37263, datanodeUuid=fd267263-b1a7-4399-bb1a-388d434d391b, infoPort=42561, infoSecurePort=0, ipcPort=38635, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400) 2024-11-12T18:30:20,871 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeb2a920a7cede3af with lease ID 0x9f8f2543b331237a: from storage DS-516f219d-6785-4145-b6e4-a2508f512a1c node DatanodeRegistration(127.0.0.1:37263, datanodeUuid=fd267263-b1a7-4399-bb1a-388d434d391b, infoPort=42561, infoSecurePort=0, ipcPort=38635, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:30:20,884 WARN [ResponseProcessor for block BP-1467028132-172.17.0.3-1731436208400:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1467028132-172.17.0.3-1731436208400:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:20,884 WARN [ResponseProcessor for block BP-1467028132-172.17.0.3-1731436208400:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1467028132-172.17.0.3-1731436208400:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-1467028132-172.17.0.3-1731436208400:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:20,884 WARN [ResponseProcessor for block BP-1467028132-172.17.0.3-1731436208400:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1467028132-172.17.0.3-1731436208400:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1467028132-172.17.0.3-1731436208400:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:20,884 WARN [DataStreamer for file /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/WALs/9911683f163c,41511,1731436209112/9911683f163c%2C41511%2C1731436209112.1731436209291 block BP-1467028132-172.17.0.3-1731436208400:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK], DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK]) is bad. 2024-11-12T18:30:20,885 WARN [ResponseProcessor for block BP-1467028132-172.17.0.3-1731436208400:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1467028132-172.17.0.3-1731436208400:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1467028132-172.17.0.3-1731436208400:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:20,885 WARN [PacketResponder: BP-1467028132-172.17.0.3-1731436208400:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:32921] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:20,885 WARN [DataStreamer for file /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta block BP-1467028132-172.17.0.3-1731436208400:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK], DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK]) is bad. 2024-11-12T18:30:20,885 WARN [PacketResponder: BP-1467028132-172.17.0.3-1731436208400:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:32921] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:20,885 WARN [PacketResponder: BP-1467028132-172.17.0.3-1731436208400:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:32921] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:20,886 WARN [DataStreamer for file /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436209593 block BP-1467028132-172.17.0.3-1731436208400:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK], DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK]) is bad. 2024-11-12T18:30:20,886 WARN [DataStreamer for file /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 block BP-1467028132-172.17.0.3-1731436208400:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK], DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK]) is bad. 2024-11-12T18:30:20,886 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-736926770_22 at /127.0.0.1:50902 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41997:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50902 dst: /127.0.0.1:41997 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:20,886 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:58014 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:32921:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58014 dst: /127.0.0.1:32921 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:20,887 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:50934 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41997:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50934 dst: /127.0.0.1:41997 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:20,887 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:50928 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41997:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50928 dst: /127.0.0.1:41997 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:20,887 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-405125934_22 at /127.0.0.1:50962 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:41997:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50962 dst: /127.0.0.1:41997 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:20,887 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-405125934_22 at /127.0.0.1:58046 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:32921:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58046 dst: /127.0.0.1:32921 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:20,887 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:58006 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:32921:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58006 dst: /127.0.0.1:32921 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:20,887 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-736926770_22 at /127.0.0.1:57992 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:32921:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57992 dst: /127.0.0.1:32921 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:20,890 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f8cbb64{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:30:20,890 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@33ef8e41{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:30:20,890 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:30:20,891 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57bb4526{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:30:20,891 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@31288d7e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/hadoop.log.dir/,STOPPED} 2024-11-12T18:30:20,892 WARN [BP-1467028132-172.17.0.3-1731436208400 heartbeating to localhost/127.0.0.1:37157 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:30:20,892 WARN [BP-1467028132-172.17.0.3-1731436208400 heartbeating to localhost/127.0.0.1:37157 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1467028132-172.17.0.3-1731436208400 (Datanode Uuid f8f02f19-7c5d-473e-b688-c183935fab36) service to localhost/127.0.0.1:37157 2024-11-12T18:30:20,893 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:30:20,893 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:30:20,893 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data3/current/BP-1467028132-172.17.0.3-1731436208400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:30:20,893 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data4/current/BP-1467028132-172.17.0.3-1731436208400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:30:20,893 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:30:20,894 WARN [DataStreamer for file /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436209593 block BP-1467028132-172.17.0.3-1731436208400:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:20,894 WARN [DataStreamer for file /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 block BP-1467028132-172.17.0.3-1731436208400:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:20,894 WARN [DataStreamer for file /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/WALs/9911683f163c,41511,1731436209112/9911683f163c%2C41511%2C1731436209112.1731436209291 block BP-1467028132-172.17.0.3-1731436208400:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:20,895 WARN [DataStreamer for file /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta block BP-1467028132-172.17.0.3-1731436208400:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:20,897 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6da0e38{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:30:20,897 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1b6f9452{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:30:20,897 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:30:20,898 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78fced7d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:30:20,898 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b222ecd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/hadoop.log.dir/,STOPPED} 2024-11-12T18:30:20,899 WARN [BP-1467028132-172.17.0.3-1731436208400 heartbeating to localhost/127.0.0.1:37157 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:30:20,899 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:30:20,899 WARN [BP-1467028132-172.17.0.3-1731436208400 heartbeating to localhost/127.0.0.1:37157 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1467028132-172.17.0.3-1731436208400 (Datanode Uuid a2b56ddb-2d65-4ebf-a3e1-e88d80d4bcf1) service to localhost/127.0.0.1:37157 2024-11-12T18:30:20,899 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:30:20,900 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data1/current/BP-1467028132-172.17.0.3-1731436208400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:30:20,900 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data2/current/BP-1467028132-172.17.0.3-1731436208400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:30:20,900 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:30:20,905 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1., hostname=9911683f163c,33915,1731436209168, seqNum=2] 2024-11-12T18:30:20,907 ERROR [FSHLog-0-hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1-prefix:9911683f163c,33915,1731436209168 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:20,907 WARN [FSHLog-0-hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1-prefix:9911683f163c,33915,1731436209168 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:20,907 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:20,907 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 9911683f163c%2C33915%2C1731436209168:(num 1731436209593) roll requested 2024-11-12T18:30:20,907 INFO [regionserver/9911683f163c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C33915%2C1731436209168.1731436220907 2024-11-12T18:30:20,914 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:20,914 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:20,914 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:20,914 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:20,914 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:20,915 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436209593 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436220907 2024-11-12T18:30:20,916 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:20,917 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:20,917 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32967:32967),(127.0.0.1/127.0.0.1:42561:42561)] 2024-11-12T18:30:20,917 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436209593 is not closed yet, will try archiving it next time 2024-11-12T18:30:20,918 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-12T18:30:20,918 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-12T18:30:20,918 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436209593 2024-11-12T18:30:20,921 WARN [IPC Server handler 1 on default port 37157 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436209593 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-12T18:30:20,925 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436209593 after 5ms 2024-11-12T18:30:21,263 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:22,199 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:22,917 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:22,919 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436220907 2024-11-12T18:30:22,919 WARN [ResponseProcessor for block BP-1467028132-172.17.0.3-1731436208400:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1467028132-172.17.0.3-1731436208400:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:22,920 WARN [DataStreamer for file /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436220907 block BP-1467028132-172.17.0.3-1731436208400:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK], DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]) is bad. 2024-11-12T18:30:22,920 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:44152 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:41099:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44152 dst: /127.0.0.1:41099 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:22,920 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:50616 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:37263:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50616 dst: /127.0.0.1:37263 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:22,922 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@28e83d63{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:30:22,922 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@908381a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:30:22,922 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:30:22,922 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77bdda7a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:30:22,922 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@98b10a0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/hadoop.log.dir/,STOPPED} 2024-11-12T18:30:22,924 WARN [BP-1467028132-172.17.0.3-1731436208400 heartbeating to localhost/127.0.0.1:37157 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:30:22,924 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:30:22,924 WARN [BP-1467028132-172.17.0.3-1731436208400 heartbeating to localhost/127.0.0.1:37157 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1467028132-172.17.0.3-1731436208400 (Datanode Uuid 8eef5b83-1e56-4eb5-b3f6-1199d5227c7c) service to localhost/127.0.0.1:37157 2024-11-12T18:30:22,924 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:30:22,924 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data7/current/BP-1467028132-172.17.0.3-1731436208400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:30:22,924 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data8/current/BP-1467028132-172.17.0.3-1731436208400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:30:22,924 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:30:23,263 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:24,199 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:24,918 WARN [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]] 2024-11-12T18:30:24,918 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:24,918 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 9911683f163c%2C33915%2C1731436209168:(num 1731436220907) roll requested 2024-11-12T18:30:24,919 INFO [regionserver/9911683f163c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C33915%2C1731436209168.1731436224919 2024-11-12T18:30:24,922 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:24,922 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK]) is bad. 2024-11-12T18:30:24,922 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741839_1021 2024-11-12T18:30:24,925 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK] 2024-11-12T18:30:24,925 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436209593 after 4007ms 2024-11-12T18:30:24,928 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:24,929 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK], DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]) is bad. 2024-11-12T18:30:24,929 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741840_1022 2024-11-12T18:30:24,929 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-12T18:30:24,929 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK] 2024-11-12T18:30:24,931 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:24,931 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK], DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]) is bad. 2024-11-12T18:30:24,931 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741841_1023 2024-11-12T18:30:24,931 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK] 2024-11-12T18:30:24,936 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:24,936 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:24,936 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:24,936 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:24,936 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:24,936 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436220907 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436224919 2024-11-12T18:30:24,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37263 is added to blk_1073741838_1020 (size=3600) 2024-11-12T18:30:24,943 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42561:42561),(127.0.0.1/127.0.0.1:38133:38133)] 2024-11-12T18:30:24,943 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436209593 is not closed yet, will try archiving it next time 2024-11-12T18:30:24,943 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436220907 is not closed yet, will try archiving it next time 2024-11-12T18:30:25,264 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:25,339 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436209593 is not closed yet, will try archiving it next time 2024-11-12T18:30:26,200 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:26,884 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1452931c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37263, datanodeUuid=fd267263-b1a7-4399-bb1a-388d434d391b, infoPort=42561, infoSecurePort=0, ipcPort=38635, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400):Failed to transfer BP-1467028132-172.17.0.3-1731436208400:blk_1073741838_1020 to 127.0.0.1:32921 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:26,933 WARN [ResponseProcessor for block BP-1467028132-172.17.0.3-1731436208400:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1467028132-172.17.0.3-1731436208400:blk_1073741842_1024 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:26,933 WARN [DataStreamer for file /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436224919 block BP-1467028132-172.17.0.3-1731436208400:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK], DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]) is bad. 2024-11-12T18:30:26,934 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:50632 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:37263:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50632 dst: /127.0.0.1:37263 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:26,934 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:52618 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:34767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52618 dst: /127.0.0.1:34767 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:26,936 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1b920423{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:30:26,936 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4bf203bb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:30:26,936 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:30:26,936 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6ea99ccd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:30:26,936 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b24a09{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/hadoop.log.dir/,STOPPED} 2024-11-12T18:30:26,938 WARN [BP-1467028132-172.17.0.3-1731436208400 heartbeating to localhost/127.0.0.1:37157 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:30:26,938 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:30:26,938 WARN [BP-1467028132-172.17.0.3-1731436208400 heartbeating to localhost/127.0.0.1:37157 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1467028132-172.17.0.3-1731436208400 (Datanode Uuid fd267263-b1a7-4399-bb1a-388d434d391b) service to localhost/127.0.0.1:37157 2024-11-12T18:30:26,938 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:30:26,938 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data9/current/BP-1467028132-172.17.0.3-1731436208400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:30:26,939 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data10/current/BP-1467028132-172.17.0.3-1731436208400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:30:26,939 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:30:26,943 WARN [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK]] 2024-11-12T18:30:26,943 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:26,944 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 9911683f163c%2C33915%2C1731436209168:(num 1731436224919) roll requested 2024-11-12T18:30:26,944 INFO [regionserver/9911683f163c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C33915%2C1731436209168.1731436226944 2024-11-12T18:30:26,947 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:26,947 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]) is bad. 2024-11-12T18:30:26,947 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741843_1026 2024-11-12T18:30:26,948 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK] 2024-11-12T18:30:26,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33915 {}] regionserver.HRegion(8855): Flush requested on 9450309108ebec67e3b1a70fb902d6f1 2024-11-12T18:30:26,949 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9450309108ebec67e3b1a70fb902d6f1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-12T18:30:26,950 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:26,950 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK], DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]) is bad. 2024-11-12T18:30:26,950 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741844_1027 2024-11-12T18:30:26,950 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK] 2024-11-12T18:30:26,954 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32921 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:26,954 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:33246 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741845_1028] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data6]'}, localName='127.0.0.1:34767', datanodeUuid='e73c677b-95b6-4384-996f-29dbbd9a76dd', xmitsInProgress=0}:Exception transferring block BP-1467028132-172.17.0.3-1731436208400:blk_1073741845_1028 to mirror 127.0.0.1:32921 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:26,954 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK], DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK]) is bad. 2024-11-12T18:30:26,954 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741845_1028 2024-11-12T18:30:26,955 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:33246 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741845_1028] {}] datanode.BlockReceiver(316): Block 1073741845 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-12T18:30:26,955 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:33246 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741845_1028] {}] datanode.DataXceiver(331): 127.0.0.1:34767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33246 dst: /127.0.0.1:34767 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:26,955 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK] 2024-11-12T18:30:26,957 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:26,957 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK], DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]) is bad. 2024-11-12T18:30:26,957 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741846_1029 2024-11-12T18:30:26,958 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK] 2024-11-12T18:30:26,958 WARN [IPC Server handler 4 on default port 37157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-12T18:30:26,958 WARN [IPC Server handler 4 on default port 37157 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-12T18:30:26,959 WARN [IPC Server handler 4 on default port 37157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-12T18:30:26,969 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:26,969 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:26,970 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:26,970 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:26,970 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:26,970 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436224919 with entries=12, filesize=12.96 KB; new WAL /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436226944 2024-11-12T18:30:26,970 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/18db97398b1c4e568cc39a95d4e04b66 is 1080, key is row0002/info:/1731436222926/Put/seqid=0 2024-11-12T18:30:26,972 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38133:38133)] 2024-11-12T18:30:26,972 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436209593 is not closed yet, will try archiving it next time 2024-11-12T18:30:26,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741842_1025 (size=13274) 2024-11-12T18:30:26,972 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436224919 is not closed yet, will try archiving it next time 2024-11-12T18:30:26,972 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:26,973 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK], DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]) is bad. 2024-11-12T18:30:26,973 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741848_1031 2024-11-12T18:30:26,973 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK] 2024-11-12T18:30:26,974 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:26,975 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK], DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]) is bad. 2024-11-12T18:30:26,975 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741849_1032 2024-11-12T18:30:26,975 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK] 2024-11-12T18:30:26,976 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:26,976 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK], DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK]) is bad. 2024-11-12T18:30:26,976 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741850_1033 2024-11-12T18:30:26,977 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK] 2024-11-12T18:30:26,981 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41997 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:26,981 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:33266 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data6]'}, localName='127.0.0.1:34767', datanodeUuid='e73c677b-95b6-4384-996f-29dbbd9a76dd', xmitsInProgress=0}:Exception transferring block BP-1467028132-172.17.0.3-1731436208400:blk_1073741851_1034 to mirror 127.0.0.1:41997 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:26,981 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK], DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]) is bad. 2024-11-12T18:30:26,981 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741851_1034 2024-11-12T18:30:26,981 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:33266 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-12T18:30:26,982 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:33266 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:34767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33266 dst: /127.0.0.1:34767 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:26,982 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK] 2024-11-12T18:30:26,983 WARN [IPC Server handler 0 on default port 37157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-12T18:30:26,983 WARN [IPC Server handler 0 on default port 37157 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-12T18:30:26,983 WARN [IPC Server handler 0 on default port 37157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-12T18:30:26,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741852_1035 (size=10347) 2024-11-12T18:30:27,264 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:27,373 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436209593 is not closed yet, will try archiving it next time 2024-11-12T18:30:27,387 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/18db97398b1c4e568cc39a95d4e04b66 2024-11-12T18:30:27,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/18db97398b1c4e568cc39a95d4e04b66 as hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/18db97398b1c4e568cc39a95d4e04b66 2024-11-12T18:30:27,400 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/18db97398b1c4e568cc39a95d4e04b66, entries=5, sequenceid=11, filesize=10.1 K 2024-11-12T18:30:27,402 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 9450309108ebec67e3b1a70fb902d6f1 in 453ms, sequenceid=11, compaction requested=false 2024-11-12T18:30:27,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9450309108ebec67e3b1a70fb902d6f1: 2024-11-12T18:30:27,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33915 {}] regionserver.HRegion(8855): Flush requested on 9450309108ebec67e3b1a70fb902d6f1 2024-11-12T18:30:27,574 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9450309108ebec67e3b1a70fb902d6f1 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-12T18:30:27,580 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/e3626b6a7b58487b9e249c1f86d904b2 is 1080, key is row0007/info:/1731436226950/Put/seqid=0 2024-11-12T18:30:27,587 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:27,587 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK], DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK]) is bad. 2024-11-12T18:30:27,587 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741853_1036 2024-11-12T18:30:27,588 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK] 2024-11-12T18:30:27,590 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:27,590 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK], DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]) is bad. 2024-11-12T18:30:27,590 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741854_1037 2024-11-12T18:30:27,590 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK] 2024-11-12T18:30:27,592 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:27,592 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK], DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]) is bad. 2024-11-12T18:30:27,592 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741855_1038 2024-11-12T18:30:27,593 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK] 2024-11-12T18:30:27,595 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:27,595 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK], DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]) is bad. 2024-11-12T18:30:27,595 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741856_1039 2024-11-12T18:30:27,595 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK] 2024-11-12T18:30:27,596 WARN [IPC Server handler 3 on default port 37157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-12T18:30:27,596 WARN [IPC Server handler 3 on default port 37157 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-12T18:30:27,596 WARN [IPC Server handler 3 on default port 37157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-12T18:30:27,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741857_1040 (size=12506) 2024-11-12T18:30:27,601 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/e3626b6a7b58487b9e249c1f86d904b2 2024-11-12T18:30:27,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/e3626b6a7b58487b9e249c1f86d904b2 as hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/e3626b6a7b58487b9e249c1f86d904b2 2024-11-12T18:30:27,621 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/e3626b6a7b58487b9e249c1f86d904b2, entries=7, sequenceid=24, filesize=12.2 K 2024-11-12T18:30:27,624 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 9450309108ebec67e3b1a70fb902d6f1 in 50ms, sequenceid=24, compaction requested=false 2024-11-12T18:30:27,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9450309108ebec67e3b1a70fb902d6f1: 2024-11-12T18:30:27,625 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-12T18:30:27,625 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:30:27,625 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/e3626b6a7b58487b9e249c1f86d904b2 because midkey is the same as first or last row 2024-11-12T18:30:28,200 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:28,972 WARN [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK]] 2024-11-12T18:30:28,972 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:28,973 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 9911683f163c%2C33915%2C1731436209168:(num 1731436226944) roll requested 2024-11-12T18:30:28,973 INFO [regionserver/9911683f163c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C33915%2C1731436209168.1731436228973 2024-11-12T18:30:28,976 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:28,976 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK], DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]) is bad. 2024-11-12T18:30:28,976 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741858_1041 2024-11-12T18:30:28,977 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK] 2024-11-12T18:30:28,978 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:28,978 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]) is bad. 2024-11-12T18:30:28,978 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741859_1042 2024-11-12T18:30:28,980 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK] 2024-11-12T18:30:28,982 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41099 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:28,982 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:33312 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data6]'}, localName='127.0.0.1:34767', datanodeUuid='e73c677b-95b6-4384-996f-29dbbd9a76dd', xmitsInProgress=0}:Exception transferring block BP-1467028132-172.17.0.3-1731436208400:blk_1073741860_1043 to mirror 127.0.0.1:41099 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:28,982 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]) is bad. 2024-11-12T18:30:28,983 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741860_1043 2024-11-12T18:30:28,983 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:33312 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-12T18:30:28,983 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:33312 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:34767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33312 dst: /127.0.0.1:34767 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:28,983 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK] 2024-11-12T18:30:28,990 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32921 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:28,990 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:33328 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data6]'}, localName='127.0.0.1:34767', datanodeUuid='e73c677b-95b6-4384-996f-29dbbd9a76dd', xmitsInProgress=0}:Exception transferring block BP-1467028132-172.17.0.3-1731436208400:blk_1073741861_1044 to mirror 127.0.0.1:32921 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:28,990 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK], DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK]) is bad. 2024-11-12T18:30:28,991 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741861_1044 2024-11-12T18:30:28,991 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:33328 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-12T18:30:28,991 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:33328 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:34767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33328 dst: /127.0.0.1:34767 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:28,991 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK] 2024-11-12T18:30:28,992 WARN [IPC Server handler 1 on default port 37157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-12T18:30:28,992 WARN [IPC Server handler 1 on default port 37157 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-12T18:30:28,992 WARN [IPC Server handler 1 on default port 37157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-12T18:30:28,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33915 {}] regionserver.HRegion(8855): Flush requested on 9450309108ebec67e3b1a70fb902d6f1 2024-11-12T18:30:28,997 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9450309108ebec67e3b1a70fb902d6f1 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-12T18:30:29,008 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:29,008 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:29,008 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:29,008 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:29,008 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:29,009 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/f22ebfff33844072aeeb78a1d5485aac is 1079, key is tmprow/info:/1731436228996/Put/seqid=0 2024-11-12T18:30:29,009 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436226944 with entries=14, filesize=12.82 KB; new WAL /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436228973 2024-11-12T18:30:29,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741847_1030 (size=13133) 2024-11-12T18:30:29,011 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38133:38133)] 2024-11-12T18:30:29,011 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436209593 is not closed yet, will try archiving it next time 2024-11-12T18:30:29,011 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436226944 is not closed yet, will try archiving it next time 2024-11-12T18:30:29,011 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:29,011 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK], DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]) is bad. 2024-11-12T18:30:29,011 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741863_1046 2024-11-12T18:30:29,011 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436220907 to hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/oldWALs/9911683f163c%2C33915%2C1731436209168.1731436220907 2024-11-12T18:30:29,012 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK] 2024-11-12T18:30:29,013 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436224919 to hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/oldWALs/9911683f163c%2C33915%2C1731436209168.1731436224919 2024-11-12T18:30:29,013 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:29,014 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK], DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK]) is bad. 2024-11-12T18:30:29,014 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741864_1047 2024-11-12T18:30:29,014 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK] 2024-11-12T18:30:29,015 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:29,015 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK], DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]) is bad. 2024-11-12T18:30:29,015 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741865_1048 2024-11-12T18:30:29,016 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK] 2024-11-12T18:30:29,017 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:29,017 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK], DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]) is bad. 2024-11-12T18:30:29,017 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741866_1049 2024-11-12T18:30:29,018 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK] 2024-11-12T18:30:29,018 WARN [IPC Server handler 0 on default port 37157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-12T18:30:29,018 WARN [IPC Server handler 0 on default port 37157 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-12T18:30:29,018 WARN [IPC Server handler 0 on default port 37157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-12T18:30:29,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741867_1050 (size=6027) 2024-11-12T18:30:29,265 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:29,412 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436209593 is not closed yet, will try archiving it next time 2024-11-12T18:30:29,428 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/f22ebfff33844072aeeb78a1d5485aac 2024-11-12T18:30:29,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/f22ebfff33844072aeeb78a1d5485aac as hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/f22ebfff33844072aeeb78a1d5485aac 2024-11-12T18:30:29,442 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/f22ebfff33844072aeeb78a1d5485aac, entries=1, sequenceid=34, filesize=5.9 K 2024-11-12T18:30:29,444 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 9450309108ebec67e3b1a70fb902d6f1 in 447ms, sequenceid=34, compaction requested=true 2024-11-12T18:30:29,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9450309108ebec67e3b1a70fb902d6f1: 2024-11-12T18:30:29,444 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-12T18:30:29,444 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:30:29,444 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/e3626b6a7b58487b9e249c1f86d904b2 because midkey is the same as first or last row 2024-11-12T18:30:29,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9450309108ebec67e3b1a70fb902d6f1:info, priority=-2147483648, current under compaction store size is 1 2024-11-12T18:30:29,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:30:29,444 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T18:30:29,446 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T18:30:29,446 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.HStore(1541): 9450309108ebec67e3b1a70fb902d6f1/info is initiating minor compaction (all files) 2024-11-12T18:30:29,446 INFO [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9450309108ebec67e3b1a70fb902d6f1/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1. 2024-11-12T18:30:29,446 INFO [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/18db97398b1c4e568cc39a95d4e04b66, hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/e3626b6a7b58487b9e249c1f86d904b2, hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/f22ebfff33844072aeeb78a1d5485aac] into tmpdir=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp, totalSize=28.2 K 2024-11-12T18:30:29,447 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] compactions.Compactor(225): Compacting 18db97398b1c4e568cc39a95d4e04b66, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731436222926 2024-11-12T18:30:29,447 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] compactions.Compactor(225): Compacting e3626b6a7b58487b9e249c1f86d904b2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731436226950 2024-11-12T18:30:29,448 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] compactions.Compactor(225): Compacting f22ebfff33844072aeeb78a1d5485aac, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731436228996 2024-11-12T18:30:29,464 INFO [RS:0;9911683f163c:33915-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9450309108ebec67e3b1a70fb902d6f1#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T18:30:29,465 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/bd0473a29b0a4fc29bc3b80ad51e67dc is 1080, key is row0002/info:/1731436222926/Put/seqid=0 2024-11-12T18:30:29,467 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:29,467 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK], DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK]) is bad. 2024-11-12T18:30:29,468 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741868_1051 2024-11-12T18:30:29,468 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK] 2024-11-12T18:30:29,470 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:29,470 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK], DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]) is bad. 2024-11-12T18:30:29,470 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741869_1052 2024-11-12T18:30:29,471 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK] 2024-11-12T18:30:29,473 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:29,473 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK], DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]) is bad. 2024-11-12T18:30:29,473 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741870_1053 2024-11-12T18:30:29,474 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK] 2024-11-12T18:30:29,475 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:29,475 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK], DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]) is bad. 2024-11-12T18:30:29,475 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741871_1054 2024-11-12T18:30:29,476 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK] 2024-11-12T18:30:29,477 WARN [IPC Server handler 3 on default port 37157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-12T18:30:29,477 WARN [IPC Server handler 3 on default port 37157 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-12T18:30:29,477 WARN [IPC Server handler 3 on default port 37157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-12T18:30:29,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741872_1055 (size=17994) 2024-11-12T18:30:29,896 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/bd0473a29b0a4fc29bc3b80ad51e67dc as hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/bd0473a29b0a4fc29bc3b80ad51e67dc 2024-11-12T18:30:29,912 INFO [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9450309108ebec67e3b1a70fb902d6f1/info of 9450309108ebec67e3b1a70fb902d6f1 into bd0473a29b0a4fc29bc3b80ad51e67dc(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T18:30:29,912 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9450309108ebec67e3b1a70fb902d6f1: 2024-11-12T18:30:29,912 INFO [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1., storeName=9450309108ebec67e3b1a70fb902d6f1/info, priority=13, startTime=1731436229444; duration=0sec 2024-11-12T18:30:29,912 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-12T18:30:29,912 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:30:29,912 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/bd0473a29b0a4fc29bc3b80ad51e67dc because midkey is the same as first or last row 2024-11-12T18:30:29,912 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-12T18:30:29,912 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:30:29,913 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/bd0473a29b0a4fc29bc3b80ad51e67dc because midkey is the same as first or last row 2024-11-12T18:30:29,913 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-12T18:30:29,913 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:30:29,913 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/bd0473a29b0a4fc29bc3b80ad51e67dc because midkey is the same as first or last row 2024-11-12T18:30:29,913 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:30:29,913 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9450309108ebec67e3b1a70fb902d6f1:info 2024-11-12T18:30:30,201 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:30,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33915 {}] regionserver.HRegion(8855): Flush requested on 9450309108ebec67e3b1a70fb902d6f1 2024-11-12T18:30:30,419 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9450309108ebec67e3b1a70fb902d6f1 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-12T18:30:30,424 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/141d69818536496795afe4bbc8de174b is 1079, key is tmprow/info:/1731436230417/Put/seqid=0 2024-11-12T18:30:30,426 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:30,426 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK], DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]) is bad. 2024-11-12T18:30:30,426 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741873_1056 2024-11-12T18:30:30,427 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK] 2024-11-12T18:30:30,428 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:30,428 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK], DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK]) is bad. 2024-11-12T18:30:30,428 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741874_1057 2024-11-12T18:30:30,429 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32921,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK] 2024-11-12T18:30:30,430 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:30,430 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK], DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]) is bad. 2024-11-12T18:30:30,430 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741875_1058 2024-11-12T18:30:30,431 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK] 2024-11-12T18:30:30,432 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:30,432 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK], DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]) is bad. 2024-11-12T18:30:30,432 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741876_1059 2024-11-12T18:30:30,433 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK] 2024-11-12T18:30:30,434 WARN [IPC Server handler 4 on default port 37157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-12T18:30:30,434 WARN [IPC Server handler 4 on default port 37157 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-12T18:30:30,434 WARN [IPC Server handler 4 on default port 37157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-12T18:30:30,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741877_1060 (size=6027) 2024-11-12T18:30:30,555 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3772ca2d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34767, datanodeUuid=e73c677b-95b6-4384-996f-29dbbd9a76dd, infoPort=38133, infoSecurePort=0, ipcPort=36133, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400):Failed to transfer BP-1467028132-172.17.0.3-1731436208400:blk_1073741842_1025 to 127.0.0.1:41099 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:30,555 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@39b532b3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34767, datanodeUuid=e73c677b-95b6-4384-996f-29dbbd9a76dd, infoPort=38133, infoSecurePort=0, ipcPort=36133, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400):Failed to transfer BP-1467028132-172.17.0.3-1731436208400:blk_1073741852_1035 to 127.0.0.1:32921 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:30,838 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/141d69818536496795afe4bbc8de174b 2024-11-12T18:30:30,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/141d69818536496795afe4bbc8de174b as hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/141d69818536496795afe4bbc8de174b 2024-11-12T18:30:30,854 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/141d69818536496795afe4bbc8de174b, entries=1, sequenceid=45, filesize=5.9 K 2024-11-12T18:30:30,856 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 9450309108ebec67e3b1a70fb902d6f1 in 437ms, sequenceid=45, compaction requested=false 2024-11-12T18:30:30,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9450309108ebec67e3b1a70fb902d6f1: 2024-11-12T18:30:30,856 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-12T18:30:30,856 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:30:30,856 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/bd0473a29b0a4fc29bc3b80ad51e67dc because midkey is the same as first or last row 2024-11-12T18:30:31,011 WARN [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-12T18:30:31,012 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:31,049 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:30:31,053 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:30:31,056 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:30:31,056 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:30:31,056 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T18:30:31,057 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7750b966{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:30:31,057 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@545fbf3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:30:31,206 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5fe4165a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/java.io.tmpdir/jetty-localhost-39379-hadoop-hdfs-3_4_1-tests_jar-_-any-7523347151163922742/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:30:31,206 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@23ddd153{HTTP/1.1, (http/1.1)}{localhost:39379} 2024-11-12T18:30:31,207 INFO [Time-limited test {}] server.Server(415): Started @126968ms 2024-11-12T18:30:31,211 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:30:31,265 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:31,377 WARN [Thread-972 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:30:31,393 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbd517cc6c6115463 with lease ID 0x9f8f2543b331237b: from storage DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6 node DatanodeRegistration(127.0.0.1:37817, datanodeUuid=f8f02f19-7c5d-473e-b688-c183935fab36, infoPort=42237, infoSecurePort=0, ipcPort=42763, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-12T18:30:31,394 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbd517cc6c6115463 with lease ID 0x9f8f2543b331237b: from storage DS-2fd37f29-e762-4d9e-bd6b-ea080d970bfd node DatanodeRegistration(127.0.0.1:37817, datanodeUuid=f8f02f19-7c5d-473e-b688-c183935fab36, infoPort=42237, infoSecurePort=0, ipcPort=42763, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:30:31,556 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@39b532b3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34767, datanodeUuid=e73c677b-95b6-4384-996f-29dbbd9a76dd, infoPort=38133, infoSecurePort=0, ipcPort=36133, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400):Failed to transfer BP-1467028132-172.17.0.3-1731436208400:blk_1073741857_1040 to 127.0.0.1:41099 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:31,556 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3772ca2d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34767, datanodeUuid=e73c677b-95b6-4384-996f-29dbbd9a76dd, infoPort=38133, infoSecurePort=0, ipcPort=36133, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400):Failed to transfer BP-1467028132-172.17.0.3-1731436208400:blk_1073741847_1030 to 127.0.0.1:37263 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:32,201 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:33,012 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:33,266 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:33,556 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@39b532b3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34767, datanodeUuid=e73c677b-95b6-4384-996f-29dbbd9a76dd, infoPort=38133, infoSecurePort=0, ipcPort=36133, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400):Failed to transfer BP-1467028132-172.17.0.3-1731436208400:blk_1073741872_1055 to 127.0.0.1:41099 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:33,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37817 is added to blk_1073741867_1050 (size=6027) 2024-11-12T18:30:34,202 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:34,555 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3772ca2d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34767, datanodeUuid=e73c677b-95b6-4384-996f-29dbbd9a76dd, infoPort=38133, infoSecurePort=0, ipcPort=36133, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400):Failed to transfer BP-1467028132-172.17.0.3-1731436208400:blk_1073741877_1060 to 127.0.0.1:41099 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:35,012 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:35,266 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:36,202 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:37,013 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:37,267 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:38,202 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:39,013 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:39,092 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-12T18:30:39,267 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:39,387 ERROR [FSHLog-0-hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData-prefix:9911683f163c,41511,1731436209112 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:39,387 WARN [FSHLog-0-hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData-prefix:9911683f163c,41511,1731436209112 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:39,387 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 9911683f163c%2C41511%2C1731436209112:(num 1731436209291) roll requested 2024-11-12T18:30:39,388 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C41511%2C1731436209112.1731436239388 2024-11-12T18:30:39,395 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:39,395 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:39,395 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:39,395 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:39,395 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:39,395 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/WALs/9911683f163c,41511,1731436209112/9911683f163c%2C41511%2C1731436209112.1731436209291 with entries=54, filesize=26.65 KB; new WAL /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/WALs/9911683f163c,41511,1731436209112/9911683f163c%2C41511%2C1731436209112.1731436239388 2024-11-12T18:30:39,396 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:39,396 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:39,396 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/WALs/9911683f163c,41511,1731436209112/9911683f163c%2C41511%2C1731436209112.1731436209291 2024-11-12T18:30:39,397 WARN [IPC Server handler 4 on default port 37157 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/WALs/9911683f163c,41511,1731436209112/9911683f163c%2C41511%2C1731436209112.1731436209291 has not been closed. Lease recovery is in progress. RecoveryId = 1062 for block blk_1073741830_1006 2024-11-12T18:30:39,397 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/WALs/9911683f163c,41511,1731436209112/9911683f163c%2C41511%2C1731436209112.1731436209291 after 1ms 2024-11-12T18:30:39,404 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38133:38133),(127.0.0.1/127.0.0.1:42237:42237)] 2024-11-12T18:30:39,404 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/WALs/9911683f163c,41511,1731436209112/9911683f163c%2C41511%2C1731436209112.1731436209291 is not closed yet, will try archiving it next time 2024-11-12T18:30:40,203 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:41,014 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:41,418 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@ee6f638 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1467028132-172.17.0.3-1731436208400:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:41997,null,null]) java.net.ConnectException: Call From 9911683f163c/172.17.0.3 to localhost:38581 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-12T18:30:41,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37817 is added to blk_1073741833_1019 (size=455) 2024-11-12T18:30:41,943 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436209593 to hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/oldWALs/9911683f163c%2C33915%2C1731436209168.1731436209593 2024-11-12T18:30:41,944 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436226944 to hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/oldWALs/9911683f163c%2C33915%2C1731436209168.1731436226944 2024-11-12T18:30:42,203 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:42,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741833_1019 (size=455) 2024-11-12T18:30:43,014 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:43,398 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/WALs/9911683f163c,41511,1731436209112/9911683f163c%2C41511%2C1731436209112.1731436209291 after 4002ms 2024-11-12T18:30:44,204 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:45,014 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:46,204 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:46,890 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C33915%2C1731436209168.1731436246889 2024-11-12T18:30:46,892 WARN [Thread-1004 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:46,893 WARN [Thread-1004 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741879_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK], DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]) is bad. 2024-11-12T18:30:46,893 WARN [Thread-1004 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741879_1063 2024-11-12T18:30:46,893 WARN [Thread-1004 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK] 2024-11-12T18:30:46,896 WARN [Thread-1004 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1064 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37263 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:46,896 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-736926770_22 at /127.0.0.1:40870 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741880_1064] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data4]'}, localName='127.0.0.1:37817', datanodeUuid='f8f02f19-7c5d-473e-b688-c183935fab36', xmitsInProgress=0}:Exception transferring block BP-1467028132-172.17.0.3-1731436208400:blk_1073741880_1064 to mirror 127.0.0.1:37263 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:46,896 WARN [Thread-1004 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741880_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37817,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK], DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]) is bad. 2024-11-12T18:30:46,896 WARN [Thread-1004 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741880_1064 2024-11-12T18:30:46,896 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-736926770_22 at /127.0.0.1:40870 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741880_1064] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-12T18:30:46,896 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-736926770_22 at /127.0.0.1:40870 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741880_1064] {}] datanode.DataXceiver(331): 127.0.0.1:37817:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40870 dst: /127.0.0.1:37817 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:46,897 WARN [Thread-1004 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK] 2024-11-12T18:30:46,899 WARN [Thread-1004 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41099 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:46,899 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-736926770_22 at /127.0.0.1:40882 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741881_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data4]'}, localName='127.0.0.1:37817', datanodeUuid='f8f02f19-7c5d-473e-b688-c183935fab36', xmitsInProgress=0}:Exception transferring block BP-1467028132-172.17.0.3-1731436208400:blk_1073741881_1065 to mirror 127.0.0.1:41099 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:46,899 WARN [Thread-1004 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741881_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37817,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]) is bad. 2024-11-12T18:30:46,899 WARN [Thread-1004 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741881_1065 2024-11-12T18:30:46,899 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-736926770_22 at /127.0.0.1:40882 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741881_1065] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-12T18:30:46,899 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-736926770_22 at /127.0.0.1:40882 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741881_1065] {}] datanode.DataXceiver(331): 127.0.0.1:37817:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40882 dst: /127.0.0.1:37817 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:46,900 WARN [Thread-1004 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK] 2024-11-12T18:30:46,905 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:46,905 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:46,905 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:46,906 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:46,906 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:46,906 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436228973 with entries=13, filesize=11.81 KB; new WAL /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436246889 2024-11-12T18:30:46,907 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42237:42237),(127.0.0.1/127.0.0.1:38133:38133)] 2024-11-12T18:30:46,907 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436228973 is not closed yet, will try archiving it next time 2024-11-12T18:30:46,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741862_1045 (size=12100) 2024-11-12T18:30:46,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33915 {}] regionserver.HRegion(8855): Flush requested on 9450309108ebec67e3b1a70fb902d6f1 2024-11-12T18:30:46,918 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9450309108ebec67e3b1a70fb902d6f1 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-12T18:30:46,923 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/f8890d9d63d548d2bf1fd9dcec739fbc is 1080, key is row0013/info:/1731436246908/Put/seqid=0 2024-11-12T18:30:46,927 WARN [Thread-1012 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41099 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:46,927 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:40908 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741883_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data4]'}, localName='127.0.0.1:37817', datanodeUuid='f8f02f19-7c5d-473e-b688-c183935fab36', xmitsInProgress=0}:Exception transferring block BP-1467028132-172.17.0.3-1731436208400:blk_1073741883_1067 to mirror 127.0.0.1:41099 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:46,927 WARN [Thread-1012 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741883_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37817,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]) is bad. 2024-11-12T18:30:46,927 WARN [Thread-1012 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741883_1067 2024-11-12T18:30:46,927 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:40908 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741883_1067] {}] datanode.BlockReceiver(316): Block 1073741883 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-12T18:30:46,927 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:40908 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741883_1067] {}] datanode.DataXceiver(331): 127.0.0.1:37817:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40908 dst: /127.0.0.1:37817 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:46,928 WARN [Thread-1012 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK] 2024-11-12T18:30:46,931 WARN [Thread-1012 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37263 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:46,931 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:49628 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741884_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data6]'}, localName='127.0.0.1:34767', datanodeUuid='e73c677b-95b6-4384-996f-29dbbd9a76dd', xmitsInProgress=0}:Exception transferring block BP-1467028132-172.17.0.3-1731436208400:blk_1073741884_1068 to mirror 127.0.0.1:37263 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:46,931 WARN [Thread-1012 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741884_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK], DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]) is bad. 2024-11-12T18:30:46,931 WARN [Thread-1012 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741884_1068 2024-11-12T18:30:46,931 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:49628 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741884_1068] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-12T18:30:46,931 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:49628 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741884_1068] {}] datanode.DataXceiver(331): 127.0.0.1:34767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49628 dst: /127.0.0.1:34767 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:46,932 WARN [Thread-1012 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK] 2024-11-12T18:30:46,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741885_1069 (size=11421) 2024-11-12T18:30:46,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37817 is added to blk_1073741885_1069 (size=11421) 2024-11-12T18:30:46,938 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/f8890d9d63d548d2bf1fd9dcec739fbc 2024-11-12T18:30:46,945 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/f8890d9d63d548d2bf1fd9dcec739fbc as hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/f8890d9d63d548d2bf1fd9dcec739fbc 2024-11-12T18:30:46,951 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/f8890d9d63d548d2bf1fd9dcec739fbc, entries=6, sequenceid=55, filesize=11.2 K 2024-11-12T18:30:46,953 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for 9450309108ebec67e3b1a70fb902d6f1 in 35ms, sequenceid=55, compaction requested=true 2024-11-12T18:30:46,953 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9450309108ebec67e3b1a70fb902d6f1: 2024-11-12T18:30:46,953 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-11-12T18:30:46,953 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:30:46,953 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/bd0473a29b0a4fc29bc3b80ad51e67dc because midkey is the same as first or last row 2024-11-12T18:30:46,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9450309108ebec67e3b1a70fb902d6f1:info, priority=-2147483648, current under compaction store size is 1 2024-11-12T18:30:46,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:30:46,954 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T18:30:46,955 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T18:30:46,955 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.HStore(1541): 9450309108ebec67e3b1a70fb902d6f1/info is initiating minor compaction (all files) 2024-11-12T18:30:46,955 INFO [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9450309108ebec67e3b1a70fb902d6f1/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1. 2024-11-12T18:30:46,955 INFO [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/bd0473a29b0a4fc29bc3b80ad51e67dc, hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/141d69818536496795afe4bbc8de174b, hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/f8890d9d63d548d2bf1fd9dcec739fbc] into tmpdir=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp, totalSize=34.6 K 2024-11-12T18:30:46,956 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] compactions.Compactor(225): Compacting bd0473a29b0a4fc29bc3b80ad51e67dc, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731436222926 2024-11-12T18:30:46,956 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] compactions.Compactor(225): Compacting 141d69818536496795afe4bbc8de174b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731436230417 2024-11-12T18:30:46,956 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] compactions.Compactor(225): Compacting f8890d9d63d548d2bf1fd9dcec739fbc, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731436230824 2024-11-12T18:30:46,973 INFO [RS:0;9911683f163c:33915-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9450309108ebec67e3b1a70fb902d6f1#info#compaction#24 average throughput is 8.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T18:30:46,974 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/d09311bc0519420cbebe19c4c6bc6ff1 is 1080, key is row0002/info:/1731436222926/Put/seqid=0 2024-11-12T18:30:46,976 WARN [Thread-1024 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41099 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:46,976 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:49674 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741886_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data6]'}, localName='127.0.0.1:34767', datanodeUuid='e73c677b-95b6-4384-996f-29dbbd9a76dd', xmitsInProgress=0}:Exception transferring block BP-1467028132-172.17.0.3-1731436208400:blk_1073741886_1070 to mirror 127.0.0.1:41099 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:46,977 WARN [Thread-1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741886_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]) is bad. 2024-11-12T18:30:46,977 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:49674 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741886_1070] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-12T18:30:46,977 WARN [Thread-1024 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741886_1070 2024-11-12T18:30:46,977 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:49674 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741886_1070] {}] datanode.DataXceiver(331): 127.0.0.1:34767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49674 dst: /127.0.0.1:34767 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:46,977 WARN [Thread-1024 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK] 2024-11-12T18:30:46,980 WARN [Thread-1024 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1071 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37263 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:46,980 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:40930 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741887_1071] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data4]'}, localName='127.0.0.1:37817', datanodeUuid='f8f02f19-7c5d-473e-b688-c183935fab36', xmitsInProgress=0}:Exception transferring block BP-1467028132-172.17.0.3-1731436208400:blk_1073741887_1071 to mirror 127.0.0.1:37263 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:46,980 WARN [Thread-1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741887_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37817,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK], DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]) is bad. 2024-11-12T18:30:46,980 WARN [Thread-1024 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741887_1071 2024-11-12T18:30:46,980 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:40930 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741887_1071] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-12T18:30:46,980 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:40930 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741887_1071] {}] datanode.DataXceiver(331): 127.0.0.1:37817:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40930 dst: /127.0.0.1:37817 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:46,980 WARN [Thread-1024 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK] 2024-11-12T18:30:46,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37817 is added to blk_1073741888_1072 (size=23502) 2024-11-12T18:30:46,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741888_1072 (size=23502) 2024-11-12T18:30:46,992 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/d09311bc0519420cbebe19c4c6bc6ff1 as hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/d09311bc0519420cbebe19c4c6bc6ff1 2024-11-12T18:30:47,000 INFO [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9450309108ebec67e3b1a70fb902d6f1/info of 9450309108ebec67e3b1a70fb902d6f1 into d09311bc0519420cbebe19c4c6bc6ff1(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T18:30:47,000 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9450309108ebec67e3b1a70fb902d6f1: 2024-11-12T18:30:47,000 INFO [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1., storeName=9450309108ebec67e3b1a70fb902d6f1/info, priority=13, startTime=1731436246953; duration=0sec 2024-11-12T18:30:47,000 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-12T18:30:47,000 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:30:47,000 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/d09311bc0519420cbebe19c4c6bc6ff1 because midkey is the same as first or last row 2024-11-12T18:30:47,000 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-12T18:30:47,000 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:30:47,000 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/d09311bc0519420cbebe19c4c6bc6ff1 because midkey is the same as first or last row 2024-11-12T18:30:47,000 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-12T18:30:47,000 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:30:47,000 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/d09311bc0519420cbebe19c4c6bc6ff1 because midkey is the same as first or last row 2024-11-12T18:30:47,000 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:30:47,000 DEBUG [RS:0;9911683f163c:33915-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9450309108ebec67e3b1a70fb902d6f1:info 2024-11-12T18:30:47,015 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-12T18:30:47,015 INFO [regionserver/9911683f163c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:47,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-12T18:30:47,133 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T18:30:47,133 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:30:47,133 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:30:47,134 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:30:47,134 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-12T18:30:47,134 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-12T18:30:47,134 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1638000960, stopped=false 2024-11-12T18:30:47,134 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=9911683f163c,41511,1731436209112 2024-11-12T18:30:47,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33915-0x1003541ee640001, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:30:47,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:30:47,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44807-0x1003541ee640002, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:30:47,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33915-0x1003541ee640001, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:47,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:47,136 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T18:30:47,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44807-0x1003541ee640002, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:47,136 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T18:30:47,136 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:30:47,136 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:30:47,137 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9911683f163c,33915,1731436209168' ***** 2024-11-12T18:30:47,137 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T18:30:47,137 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9911683f163c,44807,1731436210143' ***** 2024-11-12T18:30:47,137 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T18:30:47,137 INFO [RS:0;9911683f163c:33915 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T18:30:47,137 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33915-0x1003541ee640001, quorum=127.0.0.1:50248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:30:47,137 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44807-0x1003541ee640002, quorum=127.0.0.1:50248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:30:47,137 INFO [RS:0;9911683f163c:33915 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T18:30:47,137 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T18:30:47,137 INFO [RS:0;9911683f163c:33915 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T18:30:47,137 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:30:47,137 INFO [RS:0;9911683f163c:33915 {}] regionserver.HRegionServer(3091): Received CLOSE for 9450309108ebec67e3b1a70fb902d6f1 2024-11-12T18:30:47,138 INFO [RS:1;9911683f163c:44807 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T18:30:47,138 INFO [RS:1;9911683f163c:44807 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T18:30:47,138 INFO [RS:1;9911683f163c:44807 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T18:30:47,138 INFO [RS:1;9911683f163c:44807 {}] regionserver.HRegionServer(959): stopping server 9911683f163c,44807,1731436210143 2024-11-12T18:30:47,138 INFO [RS:1;9911683f163c:44807 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:30:47,138 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T18:30:47,138 INFO [RS:0;9911683f163c:33915 {}] regionserver.HRegionServer(959): stopping server 9911683f163c,33915,1731436209168 2024-11-12T18:30:47,138 INFO [RS:1;9911683f163c:44807 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;9911683f163c:44807. 2024-11-12T18:30:47,138 INFO [RS:0;9911683f163c:33915 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:30:47,138 DEBUG [RS:1;9911683f163c:44807 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:30:47,138 INFO [RS:0;9911683f163c:33915 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;9911683f163c:33915. 2024-11-12T18:30:47,138 DEBUG [RS:1;9911683f163c:44807 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:30:47,138 DEBUG [RS:0;9911683f163c:33915 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:30:47,138 DEBUG [RS:0;9911683f163c:33915 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:30:47,138 INFO [RS:1;9911683f163c:44807 {}] regionserver.HRegionServer(976): stopping server 9911683f163c,44807,1731436210143; all regions closed. 2024-11-12T18:30:47,138 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9450309108ebec67e3b1a70fb902d6f1, disabling compactions & flushes 2024-11-12T18:30:47,138 INFO [RS:0;9911683f163c:33915 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T18:30:47,138 INFO [RS:0;9911683f163c:33915 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T18:30:47,138 INFO [RS:0;9911683f163c:33915 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T18:30:47,138 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1. 2024-11-12T18:30:47,138 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1. 2024-11-12T18:30:47,139 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1. after waiting 1 ms 2024-11-12T18:30:47,139 INFO [RS:0;9911683f163c:33915 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-12T18:30:47,139 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1. 2024-11-12T18:30:47,139 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 9450309108ebec67e3b1a70fb902d6f1 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-11-12T18:30:47,139 INFO [RS:0;9911683f163c:33915 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-12T18:30:47,139 DEBUG [RS:0;9911683f163c:33915 {}] regionserver.HRegionServer(1325): Online Regions={9450309108ebec67e3b1a70fb902d6f1=TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1., 1588230740=hbase:meta,,1.1588230740} 2024-11-12T18:30:47,139 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:47,139 DEBUG [RS:0;9911683f163c:33915 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 9450309108ebec67e3b1a70fb902d6f1 2024-11-12T18:30:47,139 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T18:30:47,139 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:47,139 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T18:30:47,139 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T18:30:47,139 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:47,139 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T18:30:47,139 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T18:30:47,139 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:47,139 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:47,139 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-12T18:30:47,140 ERROR [FSHLog-0-hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1-prefix:9911683f163c,33915,1731436209168.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:47,140 WARN [FSHLog-0-hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1-prefix:9911683f163c,33915,1731436209168.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:47,140 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:47,140 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 9911683f163c%2C33915%2C1731436209168.meta:.meta(num 1731436210021) roll requested 2024-11-12T18:30:47,140 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:47,140 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 2024-11-12T18:30:47,140 INFO [regionserver/9911683f163c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C33915%2C1731436209168.meta.1731436247140.meta 2024-11-12T18:30:47,141 WARN [IPC Server handler 0 on default port 37157 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 has not been closed. Lease recovery is in progress. RecoveryId = 1073 for block blk_1073741837_1013 2024-11-12T18:30:47,141 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 after 1ms 2024-11-12T18:30:47,144 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/ec455193fe0f45849405bef6cead0b9b is 1080, key is row0018/info:/1731436246919/Put/seqid=0 2024-11-12T18:30:47,144 WARN [Thread-1032 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1074 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41099 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:47,144 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:40942 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741889_1074] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data4]'}, localName='127.0.0.1:37817', datanodeUuid='f8f02f19-7c5d-473e-b688-c183935fab36', xmitsInProgress=0}:Exception transferring block BP-1467028132-172.17.0.3-1731436208400:blk_1073741889_1074 to mirror 127.0.0.1:41099 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:47,144 WARN [Thread-1032 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741889_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37817,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]) is bad. 2024-11-12T18:30:47,145 WARN [Thread-1032 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741889_1074 2024-11-12T18:30:47,145 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:40942 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741889_1074] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-12T18:30:47,145 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:40942 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741889_1074] {}] datanode.DataXceiver(331): 127.0.0.1:37817:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40942 dst: /127.0.0.1:37817 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:47,145 WARN [Thread-1032 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK] 2024-11-12T18:30:47,146 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:47,146 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741890_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK], DatanodeInfoWithStorage[127.0.0.1:37817,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]) is bad. 2024-11-12T18:30:47,146 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741890_1075 2024-11-12T18:30:47,147 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK] 2024-11-12T18:30:47,147 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:49698 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741891_1076] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data6]'}, localName='127.0.0.1:34767', datanodeUuid='e73c677b-95b6-4384-996f-29dbbd9a76dd', xmitsInProgress=0}:Exception transferring block BP-1467028132-172.17.0.3-1731436208400:blk_1073741891_1076 to mirror 127.0.0.1:37263 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:47,147 WARN [Thread-1032 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1076 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37263 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:47,148 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:49698 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741891_1076] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-12T18:30:47,148 WARN [Thread-1032 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741891_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK], DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]) is bad. 2024-11-12T18:30:47,148 WARN [Thread-1032 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741891_1076 2024-11-12T18:30:47,148 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:49698 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741891_1076] {}] datanode.DataXceiver(331): 127.0.0.1:34767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49698 dst: /127.0.0.1:34767 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:47,148 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:47,148 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741892_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK], DatanodeInfoWithStorage[127.0.0.1:34767,DS-4b1a099b-c591-4a37-9137-23876cb6b31c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]) is bad. 2024-11-12T18:30:47,148 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741892_1077 2024-11-12T18:30:47,148 WARN [Thread-1032 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK] 2024-11-12T18:30:47,148 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK] 2024-11-12T18:30:47,149 WARN [Thread-1032 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1078 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:47,149 WARN [Thread-1032 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741893_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK], DatanodeInfoWithStorage[127.0.0.1:37817,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]) is bad. 2024-11-12T18:30:47,149 WARN [Thread-1032 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741893_1078 2024-11-12T18:30:47,150 WARN [Thread-1032 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK] 2024-11-12T18:30:47,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37817 is added to blk_1073741894_1079 (size=11421) 2024-11-12T18:30:47,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741894_1079 (size=11421) 2024-11-12T18:30:47,154 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:47,154 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:47,154 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:47,154 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:47,154 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:47,154 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436247140.meta 2024-11-12T18:30:47,155 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:47,155 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:47,155 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta 2024-11-12T18:30:47,155 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38133:38133),(127.0.0.1/127.0.0.1:42237:42237)] 2024-11-12T18:30:47,155 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta is not closed yet, will try archiving it next time 2024-11-12T18:30:47,155 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/ec455193fe0f45849405bef6cead0b9b 2024-11-12T18:30:47,155 WARN [IPC Server handler 2 on default port 37157 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta has not been closed. Lease recovery is in progress. RecoveryId = 1081 for block blk_1073741834_1010 2024-11-12T18:30:47,156 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta after 1ms 2024-11-12T18:30:47,163 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/.tmp/info/ec455193fe0f45849405bef6cead0b9b as hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/ec455193fe0f45849405bef6cead0b9b 2024-11-12T18:30:47,169 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/ec455193fe0f45849405bef6cead0b9b, entries=6, sequenceid=65, filesize=11.2 K 2024-11-12T18:30:47,171 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 9450309108ebec67e3b1a70fb902d6f1 in 32ms, sequenceid=65, compaction requested=false 2024-11-12T18:30:47,172 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/18db97398b1c4e568cc39a95d4e04b66, hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/e3626b6a7b58487b9e249c1f86d904b2, hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/bd0473a29b0a4fc29bc3b80ad51e67dc, hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/f22ebfff33844072aeeb78a1d5485aac, hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/141d69818536496795afe4bbc8de174b, hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/f8890d9d63d548d2bf1fd9dcec739fbc] to archive 2024-11-12T18:30:47,173 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T18:30:47,173 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740/.tmp/info/06d15d21b70d4f5ca15c29eb3e178e70 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1./info:regioninfo/1731436210605/Put/seqid=0 2024-11-12T18:30:47,175 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/18db97398b1c4e568cc39a95d4e04b66 to hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/18db97398b1c4e568cc39a95d4e04b66 2024-11-12T18:30:47,177 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/e3626b6a7b58487b9e249c1f86d904b2 to hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/e3626b6a7b58487b9e249c1f86d904b2 2024-11-12T18:30:47,178 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/bd0473a29b0a4fc29bc3b80ad51e67dc to hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/bd0473a29b0a4fc29bc3b80ad51e67dc 2024-11-12T18:30:47,180 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/f22ebfff33844072aeeb78a1d5485aac to hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/f22ebfff33844072aeeb78a1d5485aac 2024-11-12T18:30:47,181 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/141d69818536496795afe4bbc8de174b to hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/141d69818536496795afe4bbc8de174b 2024-11-12T18:30:47,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741896_1082 (size=7089) 2024-11-12T18:30:47,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37817 is added to blk_1073741896_1082 (size=7089) 2024-11-12T18:30:47,183 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/f8890d9d63d548d2bf1fd9dcec739fbc to hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/info/f8890d9d63d548d2bf1fd9dcec739fbc 2024-11-12T18:30:47,183 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740/.tmp/info/06d15d21b70d4f5ca15c29eb3e178e70 2024-11-12T18:30:47,183 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=9911683f163c:41511 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-12T18:30:47,184 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [18db97398b1c4e568cc39a95d4e04b66=10347, e3626b6a7b58487b9e249c1f86d904b2=12506, bd0473a29b0a4fc29bc3b80ad51e67dc=17994, f22ebfff33844072aeeb78a1d5485aac=6027, 141d69818536496795afe4bbc8de174b=6027, f8890d9d63d548d2bf1fd9dcec739fbc=11421] 2024-11-12T18:30:47,189 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9450309108ebec67e3b1a70fb902d6f1/recovered.edits/68.seqid, newMaxSeqId=68, maxSeqId=1 2024-11-12T18:30:47,190 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1. 2024-11-12T18:30:47,190 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9450309108ebec67e3b1a70fb902d6f1: Waiting for close lock at 1731436247138Running coprocessor pre-close hooks at 1731436247138Disabling compacts and flushes for region at 1731436247138Disabling writes for close at 1731436247139 (+1 ms)Obtaining lock to block concurrent updates at 1731436247139Preparing flush snapshotting stores in 9450309108ebec67e3b1a70fb902d6f1 at 1731436247139Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1., syncing WAL and waiting on mvcc, flushsize=dataSize=6455, getHeapSize=7152, getOffHeapSize=0, getCellsCount=6 at 1731436247139Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1. at 1731436247140 (+1 ms)Flushing 9450309108ebec67e3b1a70fb902d6f1/info: creating writer at 1731436247140Flushing 9450309108ebec67e3b1a70fb902d6f1/info: appending metadata at 1731436247144 (+4 ms)Flushing 9450309108ebec67e3b1a70fb902d6f1/info: closing flushed file at 1731436247144Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30bdffa0: reopening flushed file at 1731436247162 (+18 ms)Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 9450309108ebec67e3b1a70fb902d6f1 in 32ms, sequenceid=65, compaction requested=false at 1731436247171 (+9 ms)Writing region close event to WAL at 1731436247184 (+13 ms)Running coprocessor post-close hooks at 1731436247190 (+6 ms)Closed at 1731436247190 2024-11-12T18:30:47,190 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731436210236.9450309108ebec67e3b1a70fb902d6f1. 2024-11-12T18:30:47,206 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740/.tmp/ns/c8288c66c1c64e01b0c6c08f546ab173 is 43, key is default/ns:d/1731436210071/Put/seqid=0 2024-11-12T18:30:47,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37817 is added to blk_1073741897_1083 (size=5153) 2024-11-12T18:30:47,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741897_1083 (size=5153) 2024-11-12T18:30:47,212 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740/.tmp/ns/c8288c66c1c64e01b0c6c08f546ab173 2024-11-12T18:30:47,233 INFO [regionserver/9911683f163c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-12T18:30:47,233 INFO [regionserver/9911683f163c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-12T18:30:47,234 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740/.tmp/table/4b2bc37d25e04fa0b30bd5667f7c4a91 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731436210615/Put/seqid=0 2024-11-12T18:30:47,238 WARN [Thread-1059 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1084 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37263 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:47,237 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:40988 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741898_1084] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data4]'}, localName='127.0.0.1:37817', datanodeUuid='f8f02f19-7c5d-473e-b688-c183935fab36', xmitsInProgress=0}:Exception transferring block BP-1467028132-172.17.0.3-1731436208400:blk_1073741898_1084 to mirror 127.0.0.1:37263 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:47,238 WARN [Thread-1059 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741898_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37817,DS-e1ac9835-f7bb-40ad-bae5-893dfc5227f6,DISK], DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK]) is bad. 2024-11-12T18:30:47,238 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:40988 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741898_1084] {}] datanode.BlockReceiver(316): Block 1073741898 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-12T18:30:47,238 WARN [Thread-1059 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741898_1084 2024-11-12T18:30:47,238 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2146992604_22 at /127.0.0.1:40988 [Receiving block BP-1467028132-172.17.0.3-1731436208400:blk_1073741898_1084] {}] datanode.DataXceiver(331): 127.0.0.1:37817:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40988 dst: /127.0.0.1:37817 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:47,239 WARN [Thread-1059 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37263,DS-e3794d09-5026-4dd9-8405-2324400e0891,DISK] 2024-11-12T18:30:47,240 WARN [Thread-1059 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:30:47,240 WARN [Thread-1059 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1467028132-172.17.0.3-1731436208400:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK], DatanodeInfoWithStorage[127.0.0.1:41099,DS-b65245f5-106f-4831-85b6-78ca3d6418b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK]) is bad. 2024-11-12T18:30:47,240 WARN [Thread-1059 {}] hdfs.DataStreamer(1850): Abandoning BP-1467028132-172.17.0.3-1731436208400:blk_1073741899_1085 2024-11-12T18:30:47,241 WARN [Thread-1059 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41997,DS-ceac852d-d1df-4db1-8922-b9f0dd45b386,DISK] 2024-11-12T18:30:47,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741900_1086 (size=5424) 2024-11-12T18:30:47,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37817 is added to blk_1073741900_1086 (size=5424) 2024-11-12T18:30:47,246 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740/.tmp/table/4b2bc37d25e04fa0b30bd5667f7c4a91 2024-11-12T18:30:47,253 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740/.tmp/info/06d15d21b70d4f5ca15c29eb3e178e70 as hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740/info/06d15d21b70d4f5ca15c29eb3e178e70 2024-11-12T18:30:47,260 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740/info/06d15d21b70d4f5ca15c29eb3e178e70, entries=10, sequenceid=11, filesize=6.9 K 2024-11-12T18:30:47,261 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740/.tmp/ns/c8288c66c1c64e01b0c6c08f546ab173 as hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740/ns/c8288c66c1c64e01b0c6c08f546ab173 2024-11-12T18:30:47,267 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740/ns/c8288c66c1c64e01b0c6c08f546ab173, entries=2, sequenceid=11, filesize=5.0 K 2024-11-12T18:30:47,268 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740/.tmp/table/4b2bc37d25e04fa0b30bd5667f7c4a91 as hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740/table/4b2bc37d25e04fa0b30bd5667f7c4a91 2024-11-12T18:30:47,274 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740/table/4b2bc37d25e04fa0b30bd5667f7c4a91, entries=2, sequenceid=11, filesize=5.3 K 2024-11-12T18:30:47,276 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 136ms, sequenceid=11, compaction requested=false 2024-11-12T18:30:47,280 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-12T18:30:47,281 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T18:30:47,281 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T18:30:47,281 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731436247139Running coprocessor pre-close hooks at 1731436247139Disabling compacts and flushes for region at 1731436247139Disabling writes for close at 1731436247139Obtaining lock to block concurrent updates at 1731436247139Preparing flush snapshotting stores in 1588230740 at 1731436247139Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731436247140 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731436247156 (+16 ms)Flushing 1588230740/info: creating writer at 1731436247156Flushing 1588230740/info: appending metadata at 1731436247172 (+16 ms)Flushing 1588230740/info: closing flushed file at 1731436247173 (+1 ms)Flushing 1588230740/ns: creating writer at 1731436247191 (+18 ms)Flushing 1588230740/ns: appending metadata at 1731436247205 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731436247205Flushing 1588230740/table: creating writer at 1731436247219 (+14 ms)Flushing 1588230740/table: appending metadata at 1731436247234 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731436247234Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16c35947: reopening flushed file at 1731436247252 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3cdb21c2: reopening flushed file at 1731436247260 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a509e4a: reopening flushed file at 1731436247268 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 136ms, sequenceid=11, compaction requested=false at 1731436247276 (+8 ms)Writing region close event to WAL at 1731436247277 (+1 ms)Running coprocessor post-close hooks at 1731436247281 (+4 ms)Closed at 1731436247281 2024-11-12T18:30:47,281 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-12T18:30:47,309 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.1731436228973 to hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/oldWALs/9911683f163c%2C33915%2C1731436209168.1731436228973 2024-11-12T18:30:47,339 INFO [RS:0;9911683f163c:33915 {}] regionserver.HRegionServer(976): stopping server 9911683f163c,33915,1731436209168; all regions closed. 2024-11-12T18:30:47,340 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:47,340 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:47,340 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:47,340 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:47,340 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:47,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37817 is added to blk_1073741895_1080 (size=825) 2024-11-12T18:30:47,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741895_1080 (size=825) 2024-11-12T18:30:47,449 INFO [regionserver/9911683f163c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-12T18:30:47,449 INFO [regionserver/9911683f163c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-12T18:30:47,454 INFO [regionserver/9911683f163c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:30:48,202 INFO [regionserver/9911683f163c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:30:48,555 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3772ca2d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34767, datanodeUuid=e73c677b-95b6-4384-996f-29dbbd9a76dd, infoPort=38133, infoSecurePort=0, ipcPort=36133, storageInfo=lv=-57;cid=testClusterID;nsid=1524433986;c=1731436208400):Failed to transfer BP-1467028132-172.17.0.3-1731436208400:blk_1073741862_1045 to 127.0.0.1:37263 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:49,585 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-12T18:30:49,586 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T18:30:49,586 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-12T18:30:50,422 INFO [master/9911683f163c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-12T18:30:50,422 INFO [master/9911683f163c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-12T18:30:51,142 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 after 4002ms 2024-11-12T18:30:51,157 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta after 4002ms 2024-11-12T18:30:51,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741836_1012 (size=76) 2024-11-12T18:30:51,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741832_1008 (size=32) 2024-11-12T18:30:51,423 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2f09a32e {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1467028132-172.17.0.3-1731436208400:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:41997,null,null]) java.net.ConnectException: Call From 9911683f163c/172.17.0.3 to localhost:38581 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-12T18:30:52,140 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-12T18:30:52,143 DEBUG [RS:1;9911683f163c:44807 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/oldWALs 2024-11-12T18:30:52,143 INFO [RS:1;9911683f163c:44807 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9911683f163c%2C44807%2C1731436210143:(num 1731436210337) 2024-11-12T18:30:52,143 DEBUG [RS:1;9911683f163c:44807 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:30:52,143 INFO [RS:1;9911683f163c:44807 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:30:52,143 INFO [RS:1;9911683f163c:44807 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:30:52,143 INFO [RS:1;9911683f163c:44807 {}] hbase.ChoreService(370): Chore service for: regionserver/9911683f163c:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-12T18:30:52,144 INFO [RS:1;9911683f163c:44807 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T18:30:52,144 INFO [RS:1;9911683f163c:44807 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T18:30:52,144 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:30:52,144 INFO [RS:1;9911683f163c:44807 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T18:30:52,144 INFO [RS:1;9911683f163c:44807 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:30:52,144 INFO [RS:1;9911683f163c:44807 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44807 2024-11-12T18:30:52,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44807-0x1003541ee640002, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9911683f163c,44807,1731436210143 2024-11-12T18:30:52,150 INFO [RS:1;9911683f163c:44807 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:30:52,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:30:52,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:30:52,152 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9911683f163c,44807,1731436210143] 2024-11-12T18:30:52,154 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9911683f163c,44807,1731436210143 already deleted, retry=false 2024-11-12T18:30:52,154 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9911683f163c,44807,1731436210143 expired; onlineServers=1 2024-11-12T18:30:52,193 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:52,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:52,214 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:52,214 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:52,215 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:52,215 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:52,224 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:52,225 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:52,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44807-0x1003541ee640002, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:30:52,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44807-0x1003541ee640002, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:30:52,253 INFO [RS:1;9911683f163c:44807 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:30:52,253 INFO [RS:1;9911683f163c:44807 {}] regionserver.HRegionServer(1031): Exiting; stopping=9911683f163c,44807,1731436210143; zookeeper connection closed. 2024-11-12T18:30:52,259 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2c946954 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2c946954 2024-11-12T18:30:52,341 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-12T18:30:52,346 DEBUG [RS:0;9911683f163c:33915 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/oldWALs 2024-11-12T18:30:52,346 INFO [RS:0;9911683f163c:33915 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9911683f163c%2C33915%2C1731436209168.meta:.meta(num 1731436247140) 2024-11-12T18:30:52,346 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:52,346 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:52,347 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:52,347 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:52,347 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:52,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741882_1066 (size=15140) 2024-11-12T18:30:52,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37817 is added to blk_1073741882_1066 (size=15140) 2024-11-12T18:30:52,353 DEBUG [RS:0;9911683f163c:33915 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/oldWALs 2024-11-12T18:30:52,353 INFO [RS:0;9911683f163c:33915 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9911683f163c%2C33915%2C1731436209168:(num 1731436246889) 2024-11-12T18:30:52,353 DEBUG [RS:0;9911683f163c:33915 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:30:52,353 INFO [RS:0;9911683f163c:33915 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:30:52,353 INFO [RS:0;9911683f163c:33915 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:30:52,353 INFO [RS:0;9911683f163c:33915 {}] hbase.ChoreService(370): Chore service for: regionserver/9911683f163c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-12T18:30:52,354 INFO [RS:0;9911683f163c:33915 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:30:52,354 INFO [RS:0;9911683f163c:33915 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33915 2024-11-12T18:30:52,354 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:30:52,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33915-0x1003541ee640001, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9911683f163c,33915,1731436209168 2024-11-12T18:30:52,357 INFO [RS:0;9911683f163c:33915 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:30:52,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:30:52,359 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9911683f163c,33915,1731436209168] 2024-11-12T18:30:52,360 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9911683f163c,33915,1731436209168 already deleted, retry=false 2024-11-12T18:30:52,361 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9911683f163c,33915,1731436209168 expired; onlineServers=0 2024-11-12T18:30:52,361 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '9911683f163c,41511,1731436209112' ***** 2024-11-12T18:30:52,361 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-12T18:30:52,361 INFO [M:0;9911683f163c:41511 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:30:52,361 INFO [M:0;9911683f163c:41511 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:30:52,361 DEBUG [M:0;9911683f163c:41511 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-12T18:30:52,361 DEBUG [M:0;9911683f163c:41511 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-12T18:30:52,361 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-12T18:30:52,361 DEBUG [master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436209389 {}] cleaner.HFileCleaner(306): Exit Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436209389,5,FailOnTimeoutGroup] 2024-11-12T18:30:52,361 DEBUG [master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436209389 {}] cleaner.HFileCleaner(306): Exit Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436209389,5,FailOnTimeoutGroup] 2024-11-12T18:30:52,361 INFO [M:0;9911683f163c:41511 {}] hbase.ChoreService(370): Chore service for: master/9911683f163c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-12T18:30:52,361 INFO [M:0;9911683f163c:41511 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:30:52,362 DEBUG [M:0;9911683f163c:41511 {}] master.HMaster(1795): Stopping service threads 2024-11-12T18:30:52,362 INFO [M:0;9911683f163c:41511 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-12T18:30:52,362 INFO [M:0;9911683f163c:41511 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T18:30:52,362 INFO [M:0;9911683f163c:41511 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-12T18:30:52,362 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-12T18:30:52,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-12T18:30:52,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:52,364 DEBUG [M:0;9911683f163c:41511 {}] zookeeper.ZKUtil(347): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-12T18:30:52,364 WARN [M:0;9911683f163c:41511 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-12T18:30:52,365 INFO [M:0;9911683f163c:41511 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/.lastflushedseqids 2024-11-12T18:30:52,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37817 is added to blk_1073741901_1087 (size=130) 2024-11-12T18:30:52,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741901_1087 (size=130) 2024-11-12T18:30:52,374 INFO [M:0;9911683f163c:41511 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-12T18:30:52,374 INFO [M:0;9911683f163c:41511 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-12T18:30:52,374 DEBUG [M:0;9911683f163c:41511 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T18:30:52,374 INFO [M:0;9911683f163c:41511 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:52,374 DEBUG [M:0;9911683f163c:41511 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:52,374 DEBUG [M:0;9911683f163c:41511 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T18:30:52,374 DEBUG [M:0;9911683f163c:41511 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:52,375 INFO [M:0;9911683f163c:41511 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.24 KB heapSize=29.47 KB 2024-11-12T18:30:52,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741826_1002 (size=42) 2024-11-12T18:30:52,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741828_1004 (size=1189) 2024-11-12T18:30:52,400 DEBUG [M:0;9911683f163c:41511 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ab12c950955c4116b180fe4b10f3ab2a is 82, key is hbase:meta,,1/info:regioninfo/1731436210052/Put/seqid=0 2024-11-12T18:30:52,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37817 is added to blk_1073741902_1088 (size=5672) 2024-11-12T18:30:52,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741902_1088 (size=5672) 2024-11-12T18:30:52,407 INFO [M:0;9911683f163c:41511 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ab12c950955c4116b180fe4b10f3ab2a 2024-11-12T18:30:52,437 DEBUG [M:0;9911683f163c:41511 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9631b87ca618448cb2f126b1450a3dcf is 773, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731436210621/Put/seqid=0 2024-11-12T18:30:52,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37817 is added to blk_1073741903_1089 (size=6254) 2024-11-12T18:30:52,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741903_1089 (size=6254) 2024-11-12T18:30:52,444 INFO [M:0;9911683f163c:41511 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9631b87ca618448cb2f126b1450a3dcf 2024-11-12T18:30:52,451 INFO [M:0;9911683f163c:41511 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9631b87ca618448cb2f126b1450a3dcf 2024-11-12T18:30:52,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33915-0x1003541ee640001, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:30:52,459 INFO [RS:0;9911683f163c:33915 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:30:52,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33915-0x1003541ee640001, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:30:52,459 INFO [RS:0;9911683f163c:33915 {}] regionserver.HRegionServer(1031): Exiting; stopping=9911683f163c,33915,1731436209168; zookeeper connection closed. 2024-11-12T18:30:52,459 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@527f0f9f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@527f0f9f 2024-11-12T18:30:52,460 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-12T18:30:52,468 DEBUG [M:0;9911683f163c:41511 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/73adcec14b784bf18eda6aeec2d54964 is 69, key is 9911683f163c,33915,1731436209168/rs:state/1731436209428/Put/seqid=0 2024-11-12T18:30:52,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37817 is added to blk_1073741904_1090 (size=5224) 2024-11-12T18:30:52,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741904_1090 (size=5224) 2024-11-12T18:30:52,475 INFO [M:0;9911683f163c:41511 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/73adcec14b784bf18eda6aeec2d54964 2024-11-12T18:30:52,497 DEBUG [M:0;9911683f163c:41511 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/11432b67591a4225aff6caa12a567f07 is 52, key is load_balancer_on/state:d/1731436210125/Put/seqid=0 2024-11-12T18:30:52,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741905_1091 (size=5056) 2024-11-12T18:30:52,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37817 is added to blk_1073741905_1091 (size=5056) 2024-11-12T18:30:52,504 INFO [M:0;9911683f163c:41511 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/11432b67591a4225aff6caa12a567f07 2024-11-12T18:30:52,511 DEBUG [M:0;9911683f163c:41511 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ab12c950955c4116b180fe4b10f3ab2a as hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ab12c950955c4116b180fe4b10f3ab2a 2024-11-12T18:30:52,517 INFO [M:0;9911683f163c:41511 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ab12c950955c4116b180fe4b10f3ab2a, entries=8, sequenceid=60, filesize=5.5 K 2024-11-12T18:30:52,518 DEBUG [M:0;9911683f163c:41511 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9631b87ca618448cb2f126b1450a3dcf as hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9631b87ca618448cb2f126b1450a3dcf 2024-11-12T18:30:52,524 INFO [M:0;9911683f163c:41511 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9631b87ca618448cb2f126b1450a3dcf 2024-11-12T18:30:52,524 INFO [M:0;9911683f163c:41511 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9631b87ca618448cb2f126b1450a3dcf, entries=6, sequenceid=60, filesize=6.1 K 2024-11-12T18:30:52,525 DEBUG [M:0;9911683f163c:41511 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/73adcec14b784bf18eda6aeec2d54964 as hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/73adcec14b784bf18eda6aeec2d54964 2024-11-12T18:30:52,531 INFO [M:0;9911683f163c:41511 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/73adcec14b784bf18eda6aeec2d54964, entries=2, sequenceid=60, filesize=5.1 K 2024-11-12T18:30:52,532 DEBUG [M:0;9911683f163c:41511 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/11432b67591a4225aff6caa12a567f07 as hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/11432b67591a4225aff6caa12a567f07 2024-11-12T18:30:52,538 INFO [M:0;9911683f163c:41511 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/11432b67591a4225aff6caa12a567f07, entries=1, sequenceid=60, filesize=4.9 K 2024-11-12T18:30:52,539 INFO [M:0;9911683f163c:41511 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 165ms, sequenceid=60, compaction requested=false 2024-11-12T18:30:52,541 INFO [M:0;9911683f163c:41511 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:52,541 DEBUG [M:0;9911683f163c:41511 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731436252374Disabling compacts and flushes for region at 1731436252374Disabling writes for close at 1731436252374Obtaining lock to block concurrent updates at 1731436252375 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731436252375Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23793, getHeapSize=30112, getOffHeapSize=0, getCellsCount=71 at 1731436252375Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731436252376 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731436252376Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731436252399 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731436252399Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731436252415 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731436252436 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731436252436Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731436252451 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731436252468 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731436252468Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731436252481 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731436252497 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731436252497Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@29ba95ce: reopening flushed file at 1731436252510 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@29d0e2b8: reopening flushed file at 1731436252517 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41d7ab4c: reopening flushed file at 1731436252525 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d907a42: reopening flushed file at 1731436252532 (+7 ms)Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 165ms, sequenceid=60, compaction requested=false at 1731436252539 (+7 ms)Writing region close event to WAL at 1731436252541 (+2 ms)Closed at 1731436252541 2024-11-12T18:30:52,542 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:52,542 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:52,542 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:52,542 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:52,542 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:30:52,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37817 is added to blk_1073741878_1061 (size=1045) 2024-11-12T18:30:52,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34767 is added to blk_1073741878_1061 (size=1045) 2024-11-12T18:30:52,546 INFO [M:0;9911683f163c:41511 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-12T18:30:52,546 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:30:52,546 INFO [M:0;9911683f163c:41511 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41511 2024-11-12T18:30:52,546 INFO [M:0;9911683f163c:41511 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:30:52,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:30:52,648 INFO [M:0;9911683f163c:41511 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:30:52,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41511-0x1003541ee640000, quorum=127.0.0.1:50248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:30:52,651 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5fe4165a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:30:52,651 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@23ddd153{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:30:52,652 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:30:52,652 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@545fbf3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:30:52,652 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7750b966{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/hadoop.log.dir/,STOPPED} 2024-11-12T18:30:52,653 WARN [BP-1467028132-172.17.0.3-1731436208400 heartbeating to localhost/127.0.0.1:37157 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:30:52,653 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:30:52,653 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@57a7728d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1467028132-172.17.0.3-1731436208400:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:41997,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:38581 , LocalHost:localPort 9911683f163c/172.17.0.3:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-12T18:30:52,653 WARN [BP-1467028132-172.17.0.3-1731436208400 heartbeating to localhost/127.0.0.1:37157 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1467028132-172.17.0.3-1731436208400 (Datanode Uuid f8f02f19-7c5d-473e-b688-c183935fab36) service to localhost/127.0.0.1:37157 2024-11-12T18:30:52,653 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:30:52,654 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data3/current/BP-1467028132-172.17.0.3-1731436208400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:30:52,655 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data4/current/BP-1467028132-172.17.0.3-1731436208400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:30:52,655 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@57a7728d {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1467028132-172.17.0.3-1731436208400:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:41997,null,null], DatanodeInfoWithStorage[127.0.0.1:37817,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-1467028132-172.17.0.3-1731436208400 2024-11-12T18:30:52,655 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:30:52,655 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@57a7728d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1467028132-172.17.0.3-1731436208400:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:41997,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1467028132-172.17.0.3-1731436208400 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:52,655 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@57a7728d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1467028132-172.17.0.3-1731436208400:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:37817,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1467028132-172.17.0.3-1731436208400 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:30:52,655 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@57a7728d {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1467028132-172.17.0.3-1731436208400:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:41997,null,null], DatanodeInfoWithStorage[127.0.0.1:37817,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1467028132-172.17.0.3-1731436208400:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:41997,null,null], DatanodeInfoWithStorage[127.0.0.1:37817,null,null]] 2024-11-12T18:30:52,657 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@18da41c3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:30:52,657 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3158d3ef{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:30:52,658 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:30:52,658 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10f52e31{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:30:52,658 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68953e7c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/hadoop.log.dir/,STOPPED} 2024-11-12T18:30:52,659 WARN [BP-1467028132-172.17.0.3-1731436208400 heartbeating to localhost/127.0.0.1:37157 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:30:52,659 WARN [BP-1467028132-172.17.0.3-1731436208400 heartbeating to localhost/127.0.0.1:37157 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1467028132-172.17.0.3-1731436208400 (Datanode Uuid e73c677b-95b6-4384-996f-29dbbd9a76dd) service to localhost/127.0.0.1:37157 2024-11-12T18:30:52,659 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:30:52,659 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:30:52,660 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data5/current/BP-1467028132-172.17.0.3-1731436208400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:30:52,661 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/cluster_4706a9e0-b081-0833-22fe-4d3a52344f21/data/data6/current/BP-1467028132-172.17.0.3-1731436208400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:30:52,661 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:30:52,667 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1d0180e0{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T18:30:52,668 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@38aa7c9e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:30:52,668 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:30:52,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@404c33{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:30:52,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19e00a63{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/hadoop.log.dir/,STOPPED} 2024-11-12T18:30:52,676 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-12T18:30:52,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-12T18:30:52,713 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=153 (was 78) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37157 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40907 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37157 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37157 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:40907 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (122747033) connection to localhost/127.0.0.1:37157 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37157 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (122747033) connection to localhost/127.0.0.1:37157 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (122747033) connection to localhost/127.0.0.1:37157 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:37157 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:37157 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f261cbefa48.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37157 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (122747033) connection to localhost/127.0.0.1:37157 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f261cbefa48.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=454 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=189 (was 154) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6131 (was 6575) 2024-11-12T18:30:52,720 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=153, OpenFileDescriptor=454, MaxFileDescriptor=1048576, SystemLoadAverage=189, ProcessCount=11, AvailableMemoryMB=6132 2024-11-12T18:30:52,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-12T18:30:52,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/hadoop.log.dir so I do NOT create it in target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6 2024-11-12T18:30:52,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d2b41eaf-d5f5-33fc-974a-418d166d99cd/hadoop.tmp.dir so I do NOT create it in target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6 2024-11-12T18:30:52,721 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/cluster_83f5a921-1b52-4b7d-f60a-fb97fead6060, deleteOnExit=true 2024-11-12T18:30:52,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-12T18:30:52,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/test.cache.data in system properties and HBase conf 2024-11-12T18:30:52,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/hadoop.tmp.dir in system properties and HBase conf 2024-11-12T18:30:52,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/hadoop.log.dir in system properties and HBase conf 2024-11-12T18:30:52,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-12T18:30:52,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-12T18:30:52,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-12T18:30:52,723 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-12T18:30:52,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-12T18:30:52,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-12T18:30:52,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-12T18:30:52,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T18:30:52,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-12T18:30:52,724 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-12T18:30:52,724 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T18:30:52,724 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T18:30:52,724 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-12T18:30:52,724 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/nfs.dump.dir in system properties and HBase conf 2024-11-12T18:30:52,724 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/java.io.tmpdir in system properties and HBase conf 2024-11-12T18:30:52,724 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T18:30:52,725 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-12T18:30:52,725 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-12T18:30:52,727 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-12T18:30:52,738 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:52,739 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:52,739 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:52,739 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:52,739 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:52,740 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:52,743 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:52,743 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:52,743 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:52,744 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-12T18:30:52,745 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:52,839 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:30:52,845 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:30:52,850 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:30:52,850 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:30:52,850 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T18:30:52,853 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:30:52,854 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22146658{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:30:52,854 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5864970e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:30:52,983 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@73a5fbf0{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/java.io.tmpdir/jetty-localhost-44265-hadoop-hdfs-3_4_1-tests_jar-_-any-14015514092689198812/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T18:30:52,983 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@27c14f1b{HTTP/1.1, (http/1.1)}{localhost:44265} 2024-11-12T18:30:52,983 INFO [Time-limited test {}] server.Server(415): Started @148745ms 2024-11-12T18:30:53,005 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-12T18:30:53,135 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:30:53,139 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:30:53,141 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:30:53,141 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:30:53,141 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:30:53,141 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39bfbfb2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:30:53,142 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78511eca{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:30:53,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:30:53,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:30:53,291 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c5451bf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/java.io.tmpdir/jetty-localhost-35617-hadoop-hdfs-3_4_1-tests_jar-_-any-9071898692183469552/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:30:53,292 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@14df3a8{HTTP/1.1, (http/1.1)}{localhost:35617} 2024-11-12T18:30:53,293 INFO [Time-limited test {}] server.Server(415): Started @149055ms 2024-11-12T18:30:53,295 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:30:53,344 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:30:53,348 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:30:53,350 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:30:53,350 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:30:53,350 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T18:30:53,351 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@645076e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:30:53,351 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c156613{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:30:53,399 WARN [Thread-1182 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/cluster_83f5a921-1b52-4b7d-f60a-fb97fead6060/data/data1/current/BP-1633596070-172.17.0.3-1731436252766/current, will proceed with Du for space computation calculation, 2024-11-12T18:30:53,399 WARN [Thread-1183 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/cluster_83f5a921-1b52-4b7d-f60a-fb97fead6060/data/data2/current/BP-1633596070-172.17.0.3-1731436252766/current, will proceed with Du for space computation calculation, 2024-11-12T18:30:53,417 WARN [Thread-1161 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:30:53,420 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcc3d03a22c153f2 with lease ID 0x60d6d414249e67c: Processing first storage report for DS-849e365b-d65f-4a32-9702-e9a8839d490e from datanode DatanodeRegistration(127.0.0.1:39159, datanodeUuid=15be3847-062d-4564-9b3f-a38032210215, infoPort=37703, infoSecurePort=0, ipcPort=39245, storageInfo=lv=-57;cid=testClusterID;nsid=1655988901;c=1731436252766) 2024-11-12T18:30:53,420 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcc3d03a22c153f2 with lease ID 0x60d6d414249e67c: from storage DS-849e365b-d65f-4a32-9702-e9a8839d490e node DatanodeRegistration(127.0.0.1:39159, datanodeUuid=15be3847-062d-4564-9b3f-a38032210215, infoPort=37703, infoSecurePort=0, ipcPort=39245, storageInfo=lv=-57;cid=testClusterID;nsid=1655988901;c=1731436252766), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:30:53,420 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcc3d03a22c153f2 with lease ID 0x60d6d414249e67c: Processing first storage report for DS-77c47bb0-3a31-433d-83fa-6c92210b73fb from datanode DatanodeRegistration(127.0.0.1:39159, datanodeUuid=15be3847-062d-4564-9b3f-a38032210215, infoPort=37703, infoSecurePort=0, ipcPort=39245, storageInfo=lv=-57;cid=testClusterID;nsid=1655988901;c=1731436252766) 2024-11-12T18:30:53,420 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcc3d03a22c153f2 with lease ID 0x60d6d414249e67c: from storage DS-77c47bb0-3a31-433d-83fa-6c92210b73fb node DatanodeRegistration(127.0.0.1:39159, datanodeUuid=15be3847-062d-4564-9b3f-a38032210215, infoPort=37703, infoSecurePort=0, ipcPort=39245, storageInfo=lv=-57;cid=testClusterID;nsid=1655988901;c=1731436252766), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:30:53,468 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3858a577{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/java.io.tmpdir/jetty-localhost-41705-hadoop-hdfs-3_4_1-tests_jar-_-any-4908560954509838120/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:30:53,469 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@32070d19{HTTP/1.1, (http/1.1)}{localhost:41705} 2024-11-12T18:30:53,469 INFO [Time-limited test {}] server.Server(415): Started @149230ms 2024-11-12T18:30:53,470 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:30:53,563 WARN [Thread-1208 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/cluster_83f5a921-1b52-4b7d-f60a-fb97fead6060/data/data3/current/BP-1633596070-172.17.0.3-1731436252766/current, will proceed with Du for space computation calculation, 2024-11-12T18:30:53,563 WARN [Thread-1209 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/cluster_83f5a921-1b52-4b7d-f60a-fb97fead6060/data/data4/current/BP-1633596070-172.17.0.3-1731436252766/current, will proceed with Du for space computation calculation, 2024-11-12T18:30:53,588 WARN [Thread-1197 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:30:53,590 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6ddf189471e40336 with lease ID 0x60d6d414249e67d: Processing first storage report for DS-034a2b91-6a9f-494c-8b05-711f90ff0a49 from datanode DatanodeRegistration(127.0.0.1:39961, datanodeUuid=1cfcea85-3d35-4012-b30c-0dc74e9b5ecb, infoPort=45223, infoSecurePort=0, ipcPort=37341, storageInfo=lv=-57;cid=testClusterID;nsid=1655988901;c=1731436252766) 2024-11-12T18:30:53,590 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6ddf189471e40336 with lease ID 0x60d6d414249e67d: from storage DS-034a2b91-6a9f-494c-8b05-711f90ff0a49 node DatanodeRegistration(127.0.0.1:39961, datanodeUuid=1cfcea85-3d35-4012-b30c-0dc74e9b5ecb, infoPort=45223, infoSecurePort=0, ipcPort=37341, storageInfo=lv=-57;cid=testClusterID;nsid=1655988901;c=1731436252766), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:30:53,590 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6ddf189471e40336 with lease ID 0x60d6d414249e67d: Processing first storage report for DS-10baee32-e635-436b-940d-dbd6219c1dea from datanode DatanodeRegistration(127.0.0.1:39961, datanodeUuid=1cfcea85-3d35-4012-b30c-0dc74e9b5ecb, infoPort=45223, infoSecurePort=0, ipcPort=37341, storageInfo=lv=-57;cid=testClusterID;nsid=1655988901;c=1731436252766) 2024-11-12T18:30:53,590 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6ddf189471e40336 with lease ID 0x60d6d414249e67d: from storage DS-10baee32-e635-436b-940d-dbd6219c1dea node DatanodeRegistration(127.0.0.1:39961, datanodeUuid=1cfcea85-3d35-4012-b30c-0dc74e9b5ecb, infoPort=45223, infoSecurePort=0, ipcPort=37341, storageInfo=lv=-57;cid=testClusterID;nsid=1655988901;c=1731436252766), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:30:53,602 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6 2024-11-12T18:30:53,605 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/cluster_83f5a921-1b52-4b7d-f60a-fb97fead6060/zookeeper_0, clientPort=52165, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/cluster_83f5a921-1b52-4b7d-f60a-fb97fead6060/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/cluster_83f5a921-1b52-4b7d-f60a-fb97fead6060/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-12T18:30:53,606 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52165 2024-11-12T18:30:53,606 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:53,608 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:53,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39159 is added to blk_1073741825_1001 (size=7) 2024-11-12T18:30:53,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741825_1001 (size=7) 2024-11-12T18:30:53,619 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c with version=8 2024-11-12T18:30:53,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/hbase-staging 2024-11-12T18:30:53,621 INFO [Time-limited test {}] client.ConnectionUtils(128): master/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:30:53,621 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:53,621 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:53,621 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:30:53,621 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:53,621 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:30:53,621 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-12T18:30:53,621 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:30:53,622 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45187 2024-11-12T18:30:53,623 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45187 connecting to ZooKeeper ensemble=127.0.0.1:52165 2024-11-12T18:30:53,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:451870x0, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:30:53,630 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45187-0x10035429c430000 connected 2024-11-12T18:30:53,644 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:53,645 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:53,647 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:30:53,648 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c, hbase.cluster.distributed=false 2024-11-12T18:30:53,649 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:30:53,650 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45187 2024-11-12T18:30:53,650 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45187 2024-11-12T18:30:53,650 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45187 2024-11-12T18:30:53,650 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45187 2024-11-12T18:30:53,651 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45187 2024-11-12T18:30:53,667 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:30:53,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:53,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:53,667 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:30:53,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:30:53,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:30:53,667 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T18:30:53,667 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:30:53,668 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42983 2024-11-12T18:30:53,669 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42983 connecting to ZooKeeper ensemble=127.0.0.1:52165 2024-11-12T18:30:53,670 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:53,672 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:53,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:429830x0, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:30:53,678 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:429830x0, quorum=127.0.0.1:52165, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:30:53,678 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42983-0x10035429c430001 connected 2024-11-12T18:30:53,678 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T18:30:53,679 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T18:30:53,680 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42983-0x10035429c430001, quorum=127.0.0.1:52165, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T18:30:53,681 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42983-0x10035429c430001, quorum=127.0.0.1:52165, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:30:53,681 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42983 2024-11-12T18:30:53,681 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42983 2024-11-12T18:30:53,682 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42983 2024-11-12T18:30:53,682 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42983 2024-11-12T18:30:53,682 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42983 2024-11-12T18:30:53,695 DEBUG [M:0;9911683f163c:45187 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;9911683f163c:45187 2024-11-12T18:30:53,695 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/9911683f163c,45187,1731436253621 2024-11-12T18:30:53,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:30:53,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42983-0x10035429c430001, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:30:53,698 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/9911683f163c,45187,1731436253621 2024-11-12T18:30:53,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42983-0x10035429c430001, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T18:30:53,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:53,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42983-0x10035429c430001, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:53,700 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-12T18:30:53,700 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/9911683f163c,45187,1731436253621 from backup master directory 2024-11-12T18:30:53,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/9911683f163c,45187,1731436253621 2024-11-12T18:30:53,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42983-0x10035429c430001, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:30:53,701 WARN [master/9911683f163c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:30:53,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:30:53,701 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=9911683f163c,45187,1731436253621 2024-11-12T18:30:53,706 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/hbase.id] with ID: 3ee93003-5b5a-4815-bae0-b7e2805c2436 2024-11-12T18:30:53,706 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/.tmp/hbase.id 2024-11-12T18:30:53,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39159 is added to blk_1073741826_1002 (size=42) 2024-11-12T18:30:53,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741826_1002 (size=42) 2024-11-12T18:30:53,714 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/.tmp/hbase.id]:[hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/hbase.id] 2024-11-12T18:30:53,727 INFO [master/9911683f163c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:53,727 INFO [master/9911683f163c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-12T18:30:53,728 INFO [master/9911683f163c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-12T18:30:53,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:53,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42983-0x10035429c430001, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:53,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741827_1003 (size=196) 2024-11-12T18:30:53,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39159 is added to blk_1073741827_1003 (size=196) 2024-11-12T18:30:53,738 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T18:30:53,739 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-12T18:30:53,740 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:30:53,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39159 is added to blk_1073741828_1004 (size=1189) 2024-11-12T18:30:53,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741828_1004 (size=1189) 2024-11-12T18:30:53,748 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store 2024-11-12T18:30:53,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741829_1005 (size=34) 2024-11-12T18:30:53,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39159 is added to blk_1073741829_1005 (size=34) 2024-11-12T18:30:53,755 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:30:53,755 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T18:30:53,755 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:53,756 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:53,756 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T18:30:53,756 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:53,756 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:30:53,756 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731436253755Disabling compacts and flushes for region at 1731436253755Disabling writes for close at 1731436253756 (+1 ms)Writing region close event to WAL at 1731436253756Closed at 1731436253756 2024-11-12T18:30:53,756 WARN [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/.initializing 2024-11-12T18:30:53,757 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/WALs/9911683f163c,45187,1731436253621 2024-11-12T18:30:53,759 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C45187%2C1731436253621, suffix=, logDir=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/WALs/9911683f163c,45187,1731436253621, archiveDir=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/oldWALs, maxLogs=10 2024-11-12T18:30:53,759 INFO [master/9911683f163c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C45187%2C1731436253621.1731436253759 2024-11-12T18:30:53,765 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/WALs/9911683f163c,45187,1731436253621/9911683f163c%2C45187%2C1731436253621.1731436253759 2024-11-12T18:30:53,766 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37703:37703),(127.0.0.1/127.0.0.1:45223:45223)] 2024-11-12T18:30:53,767 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:30:53,767 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:30:53,767 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:53,767 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:53,769 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:53,770 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-12T18:30:53,770 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:53,771 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:53,771 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:53,772 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-12T18:30:53,772 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:53,772 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:30:53,773 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:53,774 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-12T18:30:53,774 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:53,774 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:30:53,774 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:53,775 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-12T18:30:53,775 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:53,776 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:30:53,776 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:53,777 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:53,777 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:53,778 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:53,778 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:53,779 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T18:30:53,780 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:30:53,782 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:30:53,783 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=731962, jitterRate=-0.06926316022872925}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T18:30:53,784 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731436253767Initializing all the Stores at 1731436253768 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436253768Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436253768Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436253768Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436253768Cleaning up temporary data from old regions at 1731436253778 (+10 ms)Region opened successfully at 1731436253784 (+6 ms) 2024-11-12T18:30:53,784 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-12T18:30:53,788 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@559c460c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:30:53,789 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-12T18:30:53,790 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-12T18:30:53,790 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-12T18:30:53,790 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-12T18:30:53,790 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-12T18:30:53,791 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-12T18:30:53,791 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-12T18:30:53,793 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-12T18:30:53,794 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-12T18:30:53,795 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-12T18:30:53,795 INFO [master/9911683f163c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-12T18:30:53,796 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-12T18:30:53,797 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-12T18:30:53,798 INFO [master/9911683f163c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-12T18:30:53,798 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-12T18:30:53,800 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-12T18:30:53,801 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-12T18:30:53,802 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-12T18:30:53,804 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-12T18:30:53,805 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-12T18:30:53,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42983-0x10035429c430001, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:30:53,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:30:53,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42983-0x10035429c430001, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:53,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:53,808 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=9911683f163c,45187,1731436253621, sessionid=0x10035429c430000, setting cluster-up flag (Was=false) 2024-11-12T18:30:53,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42983-0x10035429c430001, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:53,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:53,815 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-12T18:30:53,816 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9911683f163c,45187,1731436253621 2024-11-12T18:30:53,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:53,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42983-0x10035429c430001, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:53,824 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-12T18:30:53,825 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9911683f163c,45187,1731436253621 2024-11-12T18:30:53,827 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-12T18:30:53,829 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-12T18:30:53,829 INFO [master/9911683f163c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-12T18:30:53,829 INFO [master/9911683f163c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-12T18:30:53,829 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 9911683f163c,45187,1731436253621 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-12T18:30:53,831 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:30:53,831 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:30:53,831 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:30:53,831 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:30:53,831 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/9911683f163c:0, corePoolSize=10, maxPoolSize=10 2024-11-12T18:30:53,831 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:53,831 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:30:53,831 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:53,832 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731436283832 2024-11-12T18:30:53,833 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-12T18:30:53,833 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-12T18:30:53,833 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-12T18:30:53,833 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-12T18:30:53,833 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-12T18:30:53,833 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:30:53,833 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-12T18:30:53,833 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-12T18:30:53,833 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:53,834 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-12T18:30:53,834 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-12T18:30:53,834 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-12T18:30:53,835 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:53,835 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-12T18:30:53,835 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-12T18:30:53,835 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-12T18:30:53,835 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436253835,5,FailOnTimeoutGroup] 2024-11-12T18:30:53,835 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436253835,5,FailOnTimeoutGroup] 2024-11-12T18:30:53,835 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:53,836 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-12T18:30:53,836 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:53,836 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:53,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39159 is added to blk_1073741831_1007 (size=1321) 2024-11-12T18:30:53,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741831_1007 (size=1321) 2024-11-12T18:30:53,846 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-12T18:30:53,846 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c 2024-11-12T18:30:53,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741832_1008 (size=32) 2024-11-12T18:30:53,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39159 is added to blk_1073741832_1008 (size=32) 2024-11-12T18:30:53,855 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:30:53,857 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T18:30:53,858 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T18:30:53,858 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:53,859 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:53,859 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T18:30:53,860 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T18:30:53,860 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:53,861 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:53,861 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T18:30:53,862 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T18:30:53,862 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:53,862 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:53,863 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T18:30:53,864 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T18:30:53,864 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:53,864 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:53,864 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T18:30:53,865 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740 2024-11-12T18:30:53,865 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740 2024-11-12T18:30:53,867 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T18:30:53,867 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T18:30:53,867 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T18:30:53,868 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T18:30:53,870 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:30:53,871 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=881085, jitterRate=0.1203579306602478}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T18:30:53,871 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731436253855Initializing all the Stores at 1731436253856 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436253856Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436253856Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436253857 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436253857Cleaning up temporary data from old regions at 1731436253867 (+10 ms)Region opened successfully at 1731436253871 (+4 ms) 2024-11-12T18:30:53,871 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T18:30:53,871 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T18:30:53,872 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T18:30:53,872 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T18:30:53,872 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T18:30:53,872 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T18:30:53,872 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731436253871Disabling compacts and flushes for region at 1731436253871Disabling writes for close at 1731436253872 (+1 ms)Writing region close event to WAL at 1731436253872Closed at 1731436253872 2024-11-12T18:30:53,873 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:30:53,873 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-12T18:30:53,874 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-12T18:30:53,875 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T18:30:53,876 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-12T18:30:53,884 INFO [RS:0;9911683f163c:42983 {}] regionserver.HRegionServer(746): ClusterId : 3ee93003-5b5a-4815-bae0-b7e2805c2436 2024-11-12T18:30:53,884 DEBUG [RS:0;9911683f163c:42983 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T18:30:53,887 DEBUG [RS:0;9911683f163c:42983 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T18:30:53,887 DEBUG [RS:0;9911683f163c:42983 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T18:30:53,889 DEBUG [RS:0;9911683f163c:42983 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T18:30:53,889 DEBUG [RS:0;9911683f163c:42983 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29e59a49, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:30:53,901 DEBUG [RS:0;9911683f163c:42983 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;9911683f163c:42983 2024-11-12T18:30:53,901 INFO [RS:0;9911683f163c:42983 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T18:30:53,902 INFO [RS:0;9911683f163c:42983 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T18:30:53,902 DEBUG [RS:0;9911683f163c:42983 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T18:30:53,903 INFO [RS:0;9911683f163c:42983 {}] regionserver.HRegionServer(2659): reportForDuty to master=9911683f163c,45187,1731436253621 with port=42983, startcode=1731436253666 2024-11-12T18:30:53,903 DEBUG [RS:0;9911683f163c:42983 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T18:30:53,905 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51595, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T18:30:53,906 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45187 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9911683f163c,42983,1731436253666 2024-11-12T18:30:53,906 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45187 {}] master.ServerManager(517): Registering regionserver=9911683f163c,42983,1731436253666 2024-11-12T18:30:53,907 DEBUG [RS:0;9911683f163c:42983 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c 2024-11-12T18:30:53,907 DEBUG [RS:0;9911683f163c:42983 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40969 2024-11-12T18:30:53,907 DEBUG [RS:0;9911683f163c:42983 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T18:30:53,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:30:53,910 DEBUG [RS:0;9911683f163c:42983 {}] zookeeper.ZKUtil(111): regionserver:42983-0x10035429c430001, quorum=127.0.0.1:52165, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9911683f163c,42983,1731436253666 2024-11-12T18:30:53,910 WARN [RS:0;9911683f163c:42983 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:30:53,910 INFO [RS:0;9911683f163c:42983 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:30:53,910 DEBUG [RS:0;9911683f163c:42983 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666 2024-11-12T18:30:53,910 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9911683f163c,42983,1731436253666] 2024-11-12T18:30:53,913 INFO [RS:0;9911683f163c:42983 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T18:30:53,915 INFO [RS:0;9911683f163c:42983 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T18:30:53,916 INFO [RS:0;9911683f163c:42983 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T18:30:53,916 INFO [RS:0;9911683f163c:42983 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:53,916 INFO [RS:0;9911683f163c:42983 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T18:30:53,917 INFO [RS:0;9911683f163c:42983 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T18:30:53,917 INFO [RS:0;9911683f163c:42983 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:53,917 DEBUG [RS:0;9911683f163c:42983 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:53,917 DEBUG [RS:0;9911683f163c:42983 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:53,917 DEBUG [RS:0;9911683f163c:42983 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:53,917 DEBUG [RS:0;9911683f163c:42983 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:53,917 DEBUG [RS:0;9911683f163c:42983 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:53,917 DEBUG [RS:0;9911683f163c:42983 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:30:53,917 DEBUG [RS:0;9911683f163c:42983 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:53,917 DEBUG [RS:0;9911683f163c:42983 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:53,917 DEBUG [RS:0;9911683f163c:42983 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:53,917 DEBUG [RS:0;9911683f163c:42983 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:53,917 DEBUG [RS:0;9911683f163c:42983 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:53,917 DEBUG [RS:0;9911683f163c:42983 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:30:53,917 DEBUG [RS:0;9911683f163c:42983 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:30:53,917 DEBUG [RS:0;9911683f163c:42983 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:30:53,918 INFO [RS:0;9911683f163c:42983 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:53,918 INFO [RS:0;9911683f163c:42983 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:53,918 INFO [RS:0;9911683f163c:42983 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:53,918 INFO [RS:0;9911683f163c:42983 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:53,918 INFO [RS:0;9911683f163c:42983 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:53,918 INFO [RS:0;9911683f163c:42983 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,42983,1731436253666-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:30:53,933 INFO [RS:0;9911683f163c:42983 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T18:30:53,933 INFO [RS:0;9911683f163c:42983 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,42983,1731436253666-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:53,934 INFO [RS:0;9911683f163c:42983 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:53,934 INFO [RS:0;9911683f163c:42983 {}] regionserver.Replication(171): 9911683f163c,42983,1731436253666 started 2024-11-12T18:30:53,949 INFO [RS:0;9911683f163c:42983 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:53,949 INFO [RS:0;9911683f163c:42983 {}] regionserver.HRegionServer(1482): Serving as 9911683f163c,42983,1731436253666, RpcServer on 9911683f163c/172.17.0.3:42983, sessionid=0x10035429c430001 2024-11-12T18:30:53,949 DEBUG [RS:0;9911683f163c:42983 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T18:30:53,949 DEBUG [RS:0;9911683f163c:42983 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9911683f163c,42983,1731436253666 2024-11-12T18:30:53,949 DEBUG [RS:0;9911683f163c:42983 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,42983,1731436253666' 2024-11-12T18:30:53,949 DEBUG [RS:0;9911683f163c:42983 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T18:30:53,950 DEBUG [RS:0;9911683f163c:42983 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T18:30:53,950 DEBUG [RS:0;9911683f163c:42983 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T18:30:53,950 DEBUG [RS:0;9911683f163c:42983 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T18:30:53,950 DEBUG [RS:0;9911683f163c:42983 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9911683f163c,42983,1731436253666 2024-11-12T18:30:53,950 DEBUG [RS:0;9911683f163c:42983 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,42983,1731436253666' 2024-11-12T18:30:53,950 DEBUG [RS:0;9911683f163c:42983 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T18:30:53,950 DEBUG [RS:0;9911683f163c:42983 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T18:30:53,951 DEBUG [RS:0;9911683f163c:42983 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T18:30:53,951 INFO [RS:0;9911683f163c:42983 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T18:30:53,951 INFO [RS:0;9911683f163c:42983 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T18:30:54,027 WARN [9911683f163c:45187 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-12T18:30:54,053 INFO [RS:0;9911683f163c:42983 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C42983%2C1731436253666, suffix=, logDir=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666, archiveDir=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/oldWALs, maxLogs=32 2024-11-12T18:30:54,054 INFO [RS:0;9911683f163c:42983 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C42983%2C1731436253666.1731436254054 2024-11-12T18:30:54,061 INFO [RS:0;9911683f163c:42983 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436254054 2024-11-12T18:30:54,062 DEBUG [RS:0;9911683f163c:42983 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37703:37703),(127.0.0.1/127.0.0.1:45223:45223)] 2024-11-12T18:30:54,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:30:54,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:30:54,277 DEBUG [9911683f163c:45187 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-12T18:30:54,278 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=9911683f163c,42983,1731436253666 2024-11-12T18:30:54,279 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9911683f163c,42983,1731436253666, state=OPENING 2024-11-12T18:30:54,281 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-12T18:30:54,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:54,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42983-0x10035429c430001, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:30:54,282 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T18:30:54,282 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:30:54,283 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=9911683f163c,42983,1731436253666}] 2024-11-12T18:30:54,283 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:30:54,436 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T18:30:54,438 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35123, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T18:30:54,442 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-12T18:30:54,443 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:30:54,444 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C42983%2C1731436253666.meta, suffix=.meta, logDir=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666, archiveDir=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/oldWALs, maxLogs=32 2024-11-12T18:30:54,445 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C42983%2C1731436253666.meta.1731436254445.meta 2024-11-12T18:30:54,451 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.meta.1731436254445.meta 2024-11-12T18:30:54,452 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45223:45223),(127.0.0.1/127.0.0.1:37703:37703)] 2024-11-12T18:30:54,453 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:30:54,453 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-12T18:30:54,453 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-12T18:30:54,453 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-12T18:30:54,453 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-12T18:30:54,453 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:30:54,453 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-12T18:30:54,453 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-12T18:30:54,455 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T18:30:54,456 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T18:30:54,456 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:54,456 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:54,456 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T18:30:54,457 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T18:30:54,457 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:54,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:54,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T18:30:54,458 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T18:30:54,459 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:54,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:54,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T18:30:54,460 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T18:30:54,460 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:54,460 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:30:54,460 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T18:30:54,461 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740 2024-11-12T18:30:54,462 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740 2024-11-12T18:30:54,463 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T18:30:54,463 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T18:30:54,464 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T18:30:54,465 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T18:30:54,466 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=790476, jitterRate=0.005143448710441589}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T18:30:54,466 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-12T18:30:54,466 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731436254454Writing region info on filesystem at 1731436254454Initializing all the Stores at 1731436254454Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436254454Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436254455 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436254455Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436254455Cleaning up temporary data from old regions at 1731436254463 (+8 ms)Running coprocessor post-open hooks at 1731436254466 (+3 ms)Region opened successfully at 1731436254466 2024-11-12T18:30:54,468 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731436254436 2024-11-12T18:30:54,470 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-12T18:30:54,470 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-12T18:30:54,471 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=9911683f163c,42983,1731436253666 2024-11-12T18:30:54,472 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9911683f163c,42983,1731436253666, state=OPEN 2024-11-12T18:30:54,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:30:54,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42983-0x10035429c430001, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:30:54,477 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=9911683f163c,42983,1731436253666 2024-11-12T18:30:54,477 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:30:54,477 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:30:54,480 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-12T18:30:54,480 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=9911683f163c,42983,1731436253666 in 195 msec 2024-11-12T18:30:54,483 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-12T18:30:54,483 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 606 msec 2024-11-12T18:30:54,483 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:30:54,484 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-12T18:30:54,485 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T18:30:54,485 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9911683f163c,42983,1731436253666, seqNum=-1] 2024-11-12T18:30:54,485 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T18:30:54,487 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40179, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T18:30:54,493 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 664 msec 2024-11-12T18:30:54,493 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731436254493, completionTime=-1 2024-11-12T18:30:54,493 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-12T18:30:54,493 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-12T18:30:54,495 INFO [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-12T18:30:54,495 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731436314495 2024-11-12T18:30:54,495 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731436374495 2024-11-12T18:30:54,495 INFO [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-12T18:30:54,495 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,45187,1731436253621-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:54,495 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,45187,1731436253621-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:54,495 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,45187,1731436253621-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:54,495 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-9911683f163c:45187, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:54,496 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:54,496 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:54,498 DEBUG [master/9911683f163c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-12T18:30:54,499 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.798sec 2024-11-12T18:30:54,499 INFO [master/9911683f163c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-12T18:30:54,499 INFO [master/9911683f163c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-12T18:30:54,499 INFO [master/9911683f163c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-12T18:30:54,499 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-12T18:30:54,499 INFO [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-12T18:30:54,499 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,45187,1731436253621-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:30:54,500 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,45187,1731436253621-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-12T18:30:54,502 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-12T18:30:54,502 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-12T18:30:54,502 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,45187,1731436253621-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:30:54,585 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@411711c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:30:54,585 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 9911683f163c,45187,-1 for getting cluster id 2024-11-12T18:30:54,585 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-12T18:30:54,587 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3ee93003-5b5a-4815-bae0-b7e2805c2436' 2024-11-12T18:30:54,587 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-12T18:30:54,587 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3ee93003-5b5a-4815-bae0-b7e2805c2436" 2024-11-12T18:30:54,588 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b998570, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:30:54,588 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9911683f163c,45187,-1] 2024-11-12T18:30:54,588 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-12T18:30:54,588 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:30:54,590 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59360, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-12T18:30:54,591 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@568ab5a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:30:54,591 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T18:30:54,592 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9911683f163c,42983,1731436253666, seqNum=-1] 2024-11-12T18:30:54,592 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T18:30:54,594 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50150, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T18:30:54,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=9911683f163c,45187,1731436253621 2024-11-12T18:30:54,596 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:30:54,598 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-12T18:30:54,598 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-12T18:30:54,598 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-12T18:30:54,599 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-12T18:30:54,599 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 9911683f163c,45187,1731436253621 2024-11-12T18:30:54,599 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3f997047 2024-11-12T18:30:54,600 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-12T18:30:54,601 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59372, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-12T18:30:54,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45187 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-12T18:30:54,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45187 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-12T18:30:54,602 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45187 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T18:30:54,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45187 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-12T18:30:54,605 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-12T18:30:54,605 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:54,605 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45187 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-12T18:30:54,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45187 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T18:30:54,606 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-12T18:30:54,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39159 is added to blk_1073741835_1011 (size=395) 2024-11-12T18:30:54,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741835_1011 (size=395) 2024-11-12T18:30:54,615 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d37ab61228f529a0c0c1733f5f2ded2c, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c 2024-11-12T18:30:54,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741836_1012 (size=78) 2024-11-12T18:30:54,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39159 is added to blk_1073741836_1012 (size=78) 2024-11-12T18:30:54,623 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:30:54,623 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing d37ab61228f529a0c0c1733f5f2ded2c, disabling compactions & flushes 2024-11-12T18:30:54,623 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c. 2024-11-12T18:30:54,623 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c. 2024-11-12T18:30:54,623 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c. after waiting 0 ms 2024-11-12T18:30:54,623 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c. 2024-11-12T18:30:54,624 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c. 2024-11-12T18:30:54,624 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for d37ab61228f529a0c0c1733f5f2ded2c: Waiting for close lock at 1731436254623Disabling compacts and flushes for region at 1731436254623Disabling writes for close at 1731436254623Writing region close event to WAL at 1731436254624 (+1 ms)Closed at 1731436254624 2024-11-12T18:30:54,625 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-12T18:30:54,626 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731436254625"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731436254625"}]},"ts":"1731436254625"} 2024-11-12T18:30:54,628 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-12T18:30:54,630 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-12T18:30:54,630 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731436254630"}]},"ts":"1731436254630"} 2024-11-12T18:30:54,632 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-12T18:30:54,632 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d37ab61228f529a0c0c1733f5f2ded2c, ASSIGN}] 2024-11-12T18:30:54,634 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d37ab61228f529a0c0c1733f5f2ded2c, ASSIGN 2024-11-12T18:30:54,635 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d37ab61228f529a0c0c1733f5f2ded2c, ASSIGN; state=OFFLINE, location=9911683f163c,42983,1731436253666; forceNewPlan=false, retain=false 2024-11-12T18:30:54,785 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d37ab61228f529a0c0c1733f5f2ded2c, regionState=OPENING, regionLocation=9911683f163c,42983,1731436253666 2024-11-12T18:30:54,788 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d37ab61228f529a0c0c1733f5f2ded2c, ASSIGN because future has completed 2024-11-12T18:30:54,789 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d37ab61228f529a0c0c1733f5f2ded2c, server=9911683f163c,42983,1731436253666}] 2024-11-12T18:30:54,947 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c. 2024-11-12T18:30:54,947 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => d37ab61228f529a0c0c1733f5f2ded2c, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c.', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:30:54,947 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart d37ab61228f529a0c0c1733f5f2ded2c 2024-11-12T18:30:54,947 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:30:54,947 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for d37ab61228f529a0c0c1733f5f2ded2c 2024-11-12T18:30:54,948 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for d37ab61228f529a0c0c1733f5f2ded2c 2024-11-12T18:30:54,949 INFO [StoreOpener-d37ab61228f529a0c0c1733f5f2ded2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d37ab61228f529a0c0c1733f5f2ded2c 2024-11-12T18:30:54,951 INFO [StoreOpener-d37ab61228f529a0c0c1733f5f2ded2c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d37ab61228f529a0c0c1733f5f2ded2c columnFamilyName info 2024-11-12T18:30:54,951 DEBUG [StoreOpener-d37ab61228f529a0c0c1733f5f2ded2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:30:54,951 INFO [StoreOpener-d37ab61228f529a0c0c1733f5f2ded2c-1 {}] regionserver.HStore(327): Store=d37ab61228f529a0c0c1733f5f2ded2c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:30:54,952 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for d37ab61228f529a0c0c1733f5f2ded2c 2024-11-12T18:30:54,952 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/default/TestLogRolling-testLogRollOnPipelineRestart/d37ab61228f529a0c0c1733f5f2ded2c 2024-11-12T18:30:54,953 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/default/TestLogRolling-testLogRollOnPipelineRestart/d37ab61228f529a0c0c1733f5f2ded2c 2024-11-12T18:30:54,953 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for d37ab61228f529a0c0c1733f5f2ded2c 2024-11-12T18:30:54,953 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for d37ab61228f529a0c0c1733f5f2ded2c 2024-11-12T18:30:54,955 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for d37ab61228f529a0c0c1733f5f2ded2c 2024-11-12T18:30:54,958 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/default/TestLogRolling-testLogRollOnPipelineRestart/d37ab61228f529a0c0c1733f5f2ded2c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:30:54,959 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened d37ab61228f529a0c0c1733f5f2ded2c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=787649, jitterRate=0.0015477240085601807}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-12T18:30:54,959 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d37ab61228f529a0c0c1733f5f2ded2c 2024-11-12T18:30:54,960 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for d37ab61228f529a0c0c1733f5f2ded2c: Running coprocessor pre-open hook at 1731436254948Writing region info on filesystem at 1731436254948Initializing all the Stores at 1731436254949 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436254949Cleaning up temporary data from old regions at 1731436254953 (+4 ms)Running coprocessor post-open hooks at 1731436254959 (+6 ms)Region opened successfully at 1731436254960 (+1 ms) 2024-11-12T18:30:54,962 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c., pid=6, masterSystemTime=1731436254942 2024-11-12T18:30:54,964 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c. 2024-11-12T18:30:54,964 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c. 2024-11-12T18:30:54,965 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d37ab61228f529a0c0c1733f5f2ded2c, regionState=OPEN, openSeqNum=2, regionLocation=9911683f163c,42983,1731436253666 2024-11-12T18:30:54,968 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d37ab61228f529a0c0c1733f5f2ded2c, server=9911683f163c,42983,1731436253666 because future has completed 2024-11-12T18:30:54,972 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-12T18:30:54,973 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure d37ab61228f529a0c0c1733f5f2ded2c, server=9911683f163c,42983,1731436253666 in 181 msec 2024-11-12T18:30:54,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-12T18:30:54,976 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d37ab61228f529a0c0c1733f5f2ded2c, ASSIGN in 341 msec 2024-11-12T18:30:54,977 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-12T18:30:54,977 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731436254977"}]},"ts":"1731436254977"} 2024-11-12T18:30:54,979 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-12T18:30:54,981 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-12T18:30:54,983 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 379 msec 2024-11-12T18:30:55,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:30:55,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:30:56,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:30:56,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:30:57,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:30:57,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:30:58,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:30:58,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:30:59,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:30:59,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:30:59,956 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-12T18:30:59,975 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:59,976 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:59,976 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:59,976 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:59,976 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:59,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:59,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:59,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:59,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:59,982 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:30:59,988 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-12T18:30:59,988 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-12T18:30:59,989 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-12T18:30:59,989 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-12T18:30:59,990 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T18:30:59,990 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-12T18:30:59,990 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-12T18:30:59,991 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-12T18:31:00,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:00,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:01,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:01,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:02,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:02,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:03,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:03,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:04,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:04,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:04,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45187 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T18:31:04,617 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-12T18:31:04,617 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-12T18:31:04,620 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-12T18:31:04,620 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c. 2024-11-12T18:31:04,624 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c., hostname=9911683f163c,42983,1731436253666, seqNum=2] 2024-11-12T18:31:05,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:05,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:06,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:06,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:06,627 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436254054 2024-11-12T18:31:06,628 WARN [ResponseProcessor for block BP-1633596070-172.17.0.3-1731436252766:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1633596070-172.17.0.3-1731436252766:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:06,628 WARN [ResponseProcessor for block BP-1633596070-172.17.0.3-1731436252766:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1633596070-172.17.0.3-1731436252766:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1633596070-172.17.0.3-1731436252766:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:39961,DS-034a2b91-6a9f-494c-8b05-711f90ff0a49,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:06,628 WARN [ResponseProcessor for block BP-1633596070-172.17.0.3-1731436252766:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1633596070-172.17.0.3-1731436252766:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1633596070-172.17.0.3-1731436252766:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:39961,DS-034a2b91-6a9f-494c-8b05-711f90ff0a49,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:06,629 WARN [DataStreamer for file /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.meta.1731436254445.meta block BP-1633596070-172.17.0.3-1731436252766:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1633596070-172.17.0.3-1731436252766:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39961,DS-034a2b91-6a9f-494c-8b05-711f90ff0a49,DISK], DatanodeInfoWithStorage[127.0.0.1:39159,DS-849e365b-d65f-4a32-9702-e9a8839d490e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39961,DS-034a2b91-6a9f-494c-8b05-711f90ff0a49,DISK]) is bad. 2024-11-12T18:31:06,629 WARN [PacketResponder: BP-1633596070-172.17.0.3-1731436252766:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39961] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:06,629 WARN [PacketResponder: BP-1633596070-172.17.0.3-1731436252766:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39961] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:06,629 WARN [DataStreamer for file /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/WALs/9911683f163c,45187,1731436253621/9911683f163c%2C45187%2C1731436253621.1731436253759 block BP-1633596070-172.17.0.3-1731436252766:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1633596070-172.17.0.3-1731436252766:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39159,DS-849e365b-d65f-4a32-9702-e9a8839d490e,DISK], DatanodeInfoWithStorage[127.0.0.1:39961,DS-034a2b91-6a9f-494c-8b05-711f90ff0a49,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39961,DS-034a2b91-6a9f-494c-8b05-711f90ff0a49,DISK]) is bad. 2024-11-12T18:31:06,630 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_412045516_22 at /127.0.0.1:39884 [Receiving block BP-1633596070-172.17.0.3-1731436252766:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39159:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39884 dst: /127.0.0.1:39159 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:06,630 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-786663096_22 at /127.0.0.1:39900 [Receiving block BP-1633596070-172.17.0.3-1731436252766:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39159:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39900 dst: /127.0.0.1:39159 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:06,630 WARN [DataStreamer for file /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436254054 block BP-1633596070-172.17.0.3-1731436252766:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1633596070-172.17.0.3-1731436252766:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39159,DS-849e365b-d65f-4a32-9702-e9a8839d490e,DISK], DatanodeInfoWithStorage[127.0.0.1:39961,DS-034a2b91-6a9f-494c-8b05-711f90ff0a49,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39961,DS-034a2b91-6a9f-494c-8b05-711f90ff0a49,DISK]) is bad. 2024-11-12T18:31:06,630 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-786663096_22 at /127.0.0.1:37762 [Receiving block BP-1633596070-172.17.0.3-1731436252766:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39961:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37762 dst: /127.0.0.1:39961 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:06,630 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-786663096_22 at /127.0.0.1:37756 [Receiving block BP-1633596070-172.17.0.3-1731436252766:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39961:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37756 dst: /127.0.0.1:39961 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:06,630 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-786663096_22 at /127.0.0.1:39924 [Receiving block BP-1633596070-172.17.0.3-1731436252766:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39159:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39924 dst: /127.0.0.1:39159 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:06,630 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_412045516_22 at /127.0.0.1:37732 [Receiving block BP-1633596070-172.17.0.3-1731436252766:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39961:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37732 dst: /127.0.0.1:39961 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:06,632 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3858a577{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:06,632 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@32070d19{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:31:06,632 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:31:06,633 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c156613{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:31:06,633 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@645076e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/hadoop.log.dir/,STOPPED} 2024-11-12T18:31:06,635 WARN [BP-1633596070-172.17.0.3-1731436252766 heartbeating to localhost/127.0.0.1:40969 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:31:06,635 WARN [BP-1633596070-172.17.0.3-1731436252766 heartbeating to localhost/127.0.0.1:40969 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1633596070-172.17.0.3-1731436252766 (Datanode Uuid 1cfcea85-3d35-4012-b30c-0dc74e9b5ecb) service to localhost/127.0.0.1:40969 2024-11-12T18:31:06,636 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/cluster_83f5a921-1b52-4b7d-f60a-fb97fead6060/data/data4/current/BP-1633596070-172.17.0.3-1731436252766 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:06,636 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/cluster_83f5a921-1b52-4b7d-f60a-fb97fead6060/data/data3/current/BP-1633596070-172.17.0.3-1731436252766 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:06,636 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:31:06,638 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:31:06,638 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:31:06,651 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:31:06,654 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:31:06,656 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:31:06,656 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:31:06,656 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T18:31:06,658 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47fb2b3a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:31:06,658 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@232b7e3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:31:06,772 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@298442da{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/java.io.tmpdir/jetty-localhost-38377-hadoop-hdfs-3_4_1-tests_jar-_-any-17541366733010156506/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:06,773 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6898429b{HTTP/1.1, (http/1.1)}{localhost:38377} 2024-11-12T18:31:06,773 INFO [Time-limited test {}] server.Server(415): Started @162535ms 2024-11-12T18:31:06,774 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:31:06,793 WARN [ResponseProcessor for block BP-1633596070-172.17.0.3-1731436252766:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1633596070-172.17.0.3-1731436252766:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:06,793 WARN [ResponseProcessor for block BP-1633596070-172.17.0.3-1731436252766:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1633596070-172.17.0.3-1731436252766:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:06,793 WARN [ResponseProcessor for block BP-1633596070-172.17.0.3-1731436252766:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1633596070-172.17.0.3-1731436252766:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:06,793 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-786663096_22 at /127.0.0.1:60060 [Receiving block BP-1633596070-172.17.0.3-1731436252766:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39159:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60060 dst: /127.0.0.1:39159 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:06,793 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_412045516_22 at /127.0.0.1:60074 [Receiving block BP-1633596070-172.17.0.3-1731436252766:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39159:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60074 dst: /127.0.0.1:39159 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:06,793 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-786663096_22 at /127.0.0.1:60086 [Receiving block BP-1633596070-172.17.0.3-1731436252766:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39159:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60086 dst: /127.0.0.1:39159 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:06,799 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c5451bf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:06,800 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@14df3a8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:31:06,800 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:31:06,800 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78511eca{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:31:06,800 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39bfbfb2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/hadoop.log.dir/,STOPPED} 2024-11-12T18:31:06,801 WARN [BP-1633596070-172.17.0.3-1731436252766 heartbeating to localhost/127.0.0.1:40969 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:31:06,801 WARN [BP-1633596070-172.17.0.3-1731436252766 heartbeating to localhost/127.0.0.1:40969 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1633596070-172.17.0.3-1731436252766 (Datanode Uuid 15be3847-062d-4564-9b3f-a38032210215) service to localhost/127.0.0.1:40969 2024-11-12T18:31:06,801 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:31:06,801 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:31:06,802 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/cluster_83f5a921-1b52-4b7d-f60a-fb97fead6060/data/data1/current/BP-1633596070-172.17.0.3-1731436252766 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:06,803 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/cluster_83f5a921-1b52-4b7d-f60a-fb97fead6060/data/data2/current/BP-1633596070-172.17.0.3-1731436252766 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:06,803 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:31:06,813 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:31:06,816 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:31:06,817 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:31:06,817 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:31:06,817 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:31:06,818 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@743e6fa6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:31:06,818 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2002436c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:31:06,861 WARN [Thread-1332 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:31:06,864 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf42f4fc68a53887a with lease ID 0x60d6d414249e67e: from storage DS-034a2b91-6a9f-494c-8b05-711f90ff0a49 node DatanodeRegistration(127.0.0.1:44551, datanodeUuid=1cfcea85-3d35-4012-b30c-0dc74e9b5ecb, infoPort=45557, infoSecurePort=0, ipcPort=45799, storageInfo=lv=-57;cid=testClusterID;nsid=1655988901;c=1731436252766), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:31:06,864 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf42f4fc68a53887a with lease ID 0x60d6d414249e67e: from storage DS-10baee32-e635-436b-940d-dbd6219c1dea node DatanodeRegistration(127.0.0.1:44551, datanodeUuid=1cfcea85-3d35-4012-b30c-0dc74e9b5ecb, infoPort=45557, infoSecurePort=0, ipcPort=45799, storageInfo=lv=-57;cid=testClusterID;nsid=1655988901;c=1731436252766), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:31:06,933 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@213496de{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/java.io.tmpdir/jetty-localhost-35313-hadoop-hdfs-3_4_1-tests_jar-_-any-7420104114812057597/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:06,933 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@676c988c{HTTP/1.1, (http/1.1)}{localhost:35313} 2024-11-12T18:31:06,933 INFO [Time-limited test {}] server.Server(415): Started @162695ms 2024-11-12T18:31:06,935 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:31:07,029 WARN [Thread-1363 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:31:07,032 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x753139a07a7f730e with lease ID 0x60d6d414249e67f: from storage DS-849e365b-d65f-4a32-9702-e9a8839d490e node DatanodeRegistration(127.0.0.1:36667, datanodeUuid=15be3847-062d-4564-9b3f-a38032210215, infoPort=42833, infoSecurePort=0, ipcPort=37533, storageInfo=lv=-57;cid=testClusterID;nsid=1655988901;c=1731436252766), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:31:07,032 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x753139a07a7f730e with lease ID 0x60d6d414249e67f: from storage DS-77c47bb0-3a31-433d-83fa-6c92210b73fb node DatanodeRegistration(127.0.0.1:36667, datanodeUuid=15be3847-062d-4564-9b3f-a38032210215, infoPort=42833, infoSecurePort=0, ipcPort=37533, storageInfo=lv=-57;cid=testClusterID;nsid=1655988901;c=1731436252766), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:31:07,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:07,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:07,957 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-12T18:31:07,960 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-12T18:31:07,962 ERROR [FSHLog-0-hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c-prefix:9911683f163c,42983,1731436253666 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39159,DS-849e365b-d65f-4a32-9702-e9a8839d490e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:07,962 WARN [FSHLog-0-hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c-prefix:9911683f163c,42983,1731436253666 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39159,DS-849e365b-d65f-4a32-9702-e9a8839d490e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:07,962 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 9911683f163c%2C42983%2C1731436253666:(num 1731436254054) roll requested 2024-11-12T18:31:07,962 INFO [regionserver/9911683f163c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C42983%2C1731436253666.1731436267962 2024-11-12T18:31:07,976 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436254054 newFile=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436267962 2024-11-12T18:31:07,977 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:07,977 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:07,977 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:07,978 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:07,978 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:07,978 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436254054 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436267962 2024-11-12T18:31:07,981 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39159,DS-849e365b-d65f-4a32-9702-e9a8839d490e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:07,981 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39159,DS-849e365b-d65f-4a32-9702-e9a8839d490e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:07,981 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436254054 2024-11-12T18:31:07,981 WARN [IPC Server handler 0 on default port 40969 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436254054 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1015 2024-11-12T18:31:07,982 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436254054 after 1ms 2024-11-12T18:31:07,985 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45557:45557),(127.0.0.1/127.0.0.1:42833:42833)] 2024-11-12T18:31:07,985 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436254054 is not closed yet, will try archiving it next time 2024-11-12T18:31:08,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:08,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:08,864 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-12T18:31:09,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:09,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:09,989 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-12T18:31:10,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:10,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:11,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:11,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:11,983 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436254054 after 4001ms 2024-11-12T18:31:11,992 WARN [ResponseProcessor for block BP-1633596070-172.17.0.3-1731436252766:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1633596070-172.17.0.3-1731436252766:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1633596070-172.17.0.3-1731436252766:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:36667,DS-849e365b-d65f-4a32-9702-e9a8839d490e,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:11,992 WARN [DataStreamer for file /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436267962 block BP-1633596070-172.17.0.3-1731436252766:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1633596070-172.17.0.3-1731436252766:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44551,DS-034a2b91-6a9f-494c-8b05-711f90ff0a49,DISK], DatanodeInfoWithStorage[127.0.0.1:36667,DS-849e365b-d65f-4a32-9702-e9a8839d490e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36667,DS-849e365b-d65f-4a32-9702-e9a8839d490e,DISK]) is bad. 2024-11-12T18:31:11,992 WARN [PacketResponder: BP-1633596070-172.17.0.3-1731436252766:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36667] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:11,993 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-786663096_22 at /127.0.0.1:46234 [Receiving block BP-1633596070-172.17.0.3-1731436252766:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:44551:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46234 dst: /127.0.0.1:44551 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:11,993 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-786663096_22 at /127.0.0.1:38314 [Receiving block BP-1633596070-172.17.0.3-1731436252766:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:36667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38314 dst: /127.0.0.1:36667 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:11,994 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@213496de{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:11,994 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@676c988c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:31:11,995 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:31:11,995 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2002436c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:31:11,995 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@743e6fa6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/hadoop.log.dir/,STOPPED} 2024-11-12T18:31:11,996 WARN [BP-1633596070-172.17.0.3-1731436252766 heartbeating to localhost/127.0.0.1:40969 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:31:11,996 WARN [BP-1633596070-172.17.0.3-1731436252766 heartbeating to localhost/127.0.0.1:40969 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1633596070-172.17.0.3-1731436252766 (Datanode Uuid 15be3847-062d-4564-9b3f-a38032210215) service to localhost/127.0.0.1:40969 2024-11-12T18:31:11,996 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:31:11,996 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:31:11,997 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/cluster_83f5a921-1b52-4b7d-f60a-fb97fead6060/data/data1/current/BP-1633596070-172.17.0.3-1731436252766 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:11,997 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/cluster_83f5a921-1b52-4b7d-f60a-fb97fead6060/data/data2/current/BP-1633596070-172.17.0.3-1731436252766 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:11,997 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:31:12,037 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:31:12,041 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:31:12,042 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:31:12,042 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:31:12,042 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T18:31:12,043 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2bfc4559{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:31:12,043 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@bf07770{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:31:12,158 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@674cebb1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/java.io.tmpdir/jetty-localhost-35737-hadoop-hdfs-3_4_1-tests_jar-_-any-17753147177578319044/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:12,159 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5fc0dbac{HTTP/1.1, (http/1.1)}{localhost:35737} 2024-11-12T18:31:12,159 INFO [Time-limited test {}] server.Server(415): Started @167921ms 2024-11-12T18:31:12,161 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:31:12,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:12,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:12,187 WARN [ResponseProcessor for block BP-1633596070-172.17.0.3-1731436252766:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1633596070-172.17.0.3-1731436252766:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:12,187 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-786663096_22 at /127.0.0.1:46244 [Receiving block BP-1633596070-172.17.0.3-1731436252766:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:44551:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46244 dst: /127.0.0.1:44551 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:12,190 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@298442da{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:12,190 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6898429b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:31:12,190 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:31:12,190 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@232b7e3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:31:12,191 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47fb2b3a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/hadoop.log.dir/,STOPPED} 2024-11-12T18:31:12,192 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:31:12,192 WARN [BP-1633596070-172.17.0.3-1731436252766 heartbeating to localhost/127.0.0.1:40969 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:31:12,192 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:31:12,192 WARN [BP-1633596070-172.17.0.3-1731436252766 heartbeating to localhost/127.0.0.1:40969 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1633596070-172.17.0.3-1731436252766 (Datanode Uuid 1cfcea85-3d35-4012-b30c-0dc74e9b5ecb) service to localhost/127.0.0.1:40969 2024-11-12T18:31:12,193 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/cluster_83f5a921-1b52-4b7d-f60a-fb97fead6060/data/data3/current/BP-1633596070-172.17.0.3-1731436252766 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:12,193 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/cluster_83f5a921-1b52-4b7d-f60a-fb97fead6060/data/data4/current/BP-1633596070-172.17.0.3-1731436252766 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:12,193 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:31:12,203 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:31:12,207 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:31:12,207 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:31:12,207 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:31:12,208 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:31:12,208 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@eed25d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:31:12,208 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@775ad417{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:31:12,259 WARN [Thread-1406 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:31:12,262 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xecb47a8c9f437d85 with lease ID 0x60d6d414249e680: from storage DS-849e365b-d65f-4a32-9702-e9a8839d490e node DatanodeRegistration(127.0.0.1:43127, datanodeUuid=15be3847-062d-4564-9b3f-a38032210215, infoPort=46865, infoSecurePort=0, ipcPort=44327, storageInfo=lv=-57;cid=testClusterID;nsid=1655988901;c=1731436252766), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:31:12,262 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xecb47a8c9f437d85 with lease ID 0x60d6d414249e680: from storage DS-77c47bb0-3a31-433d-83fa-6c92210b73fb node DatanodeRegistration(127.0.0.1:43127, datanodeUuid=15be3847-062d-4564-9b3f-a38032210215, infoPort=46865, infoSecurePort=0, ipcPort=44327, storageInfo=lv=-57;cid=testClusterID;nsid=1655988901;c=1731436252766), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:31:12,341 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@66ac2bb4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/java.io.tmpdir/jetty-localhost-41955-hadoop-hdfs-3_4_1-tests_jar-_-any-10405586379135386638/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:12,342 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@76915c89{HTTP/1.1, (http/1.1)}{localhost:41955} 2024-11-12T18:31:12,342 INFO [Time-limited test {}] server.Server(415): Started @168104ms 2024-11-12T18:31:12,344 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:31:12,446 WARN [Thread-1437 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:31:12,449 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbb1fbae82d9ebb62 with lease ID 0x60d6d414249e681: from storage DS-034a2b91-6a9f-494c-8b05-711f90ff0a49 node DatanodeRegistration(127.0.0.1:36081, datanodeUuid=1cfcea85-3d35-4012-b30c-0dc74e9b5ecb, infoPort=33013, infoSecurePort=0, ipcPort=33529, storageInfo=lv=-57;cid=testClusterID;nsid=1655988901;c=1731436252766), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:31:12,449 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbb1fbae82d9ebb62 with lease ID 0x60d6d414249e681: from storage DS-10baee32-e635-436b-940d-dbd6219c1dea node DatanodeRegistration(127.0.0.1:36081, datanodeUuid=1cfcea85-3d35-4012-b30c-0dc74e9b5ecb, infoPort=33013, infoSecurePort=0, ipcPort=33529, storageInfo=lv=-57;cid=testClusterID;nsid=1655988901;c=1731436252766), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-12T18:31:13,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:13,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:13,370 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-12T18:31:13,372 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-12T18:31:13,374 ERROR [FSHLog-0-hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c-prefix:9911683f163c,42983,1731436253666 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44551,DS-034a2b91-6a9f-494c-8b05-711f90ff0a49,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:13,374 WARN [FSHLog-0-hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c-prefix:9911683f163c,42983,1731436253666 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44551,DS-034a2b91-6a9f-494c-8b05-711f90ff0a49,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:13,374 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 9911683f163c%2C42983%2C1731436253666:(num 1731436267962) roll requested 2024-11-12T18:31:13,374 INFO [regionserver/9911683f163c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C42983%2C1731436253666.1731436273374 2024-11-12T18:31:13,380 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436267962 newFile=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436273374 2024-11-12T18:31:13,380 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:13,380 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:13,380 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:13,380 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:13,380 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:13,380 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436267962 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436273374 2024-11-12T18:31:13,381 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44551,DS-034a2b91-6a9f-494c-8b05-711f90ff0a49,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:13,381 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44551,DS-034a2b91-6a9f-494c-8b05-711f90ff0a49,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:13,381 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436267962 2024-11-12T18:31:13,381 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33013:33013),(127.0.0.1/127.0.0.1:46865:46865)] 2024-11-12T18:31:13,381 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436267962 is not closed yet, will try archiving it next time 2024-11-12T18:31:13,381 WARN [IPC Server handler 3 on default port 40969 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436267962 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-12T18:31:13,382 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436267962 after 0ms 2024-11-12T18:31:14,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:14,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:14,262 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-12T18:31:15,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:15,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:15,383 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C42983%2C1731436253666.1731436275383 2024-11-12T18:31:15,390 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436273374 newFile=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436275383 2024-11-12T18:31:15,390 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:15,390 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:15,390 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:15,390 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:15,390 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:15,391 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436273374 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436275383 2024-11-12T18:31:15,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43127 is added to blk_1073741838_1019 (size=1264) 2024-11-12T18:31:15,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36081 is added to blk_1073741838_1019 (size=1264) 2024-11-12T18:31:15,393 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436267962 is not closed yet, will try archiving it next time 2024-11-12T18:31:15,396 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46865:46865),(127.0.0.1/127.0.0.1:33013:33013)] 2024-11-12T18:31:15,396 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436267962 is not closed yet, will try archiving it next time 2024-11-12T18:31:15,397 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436254054 2024-11-12T18:31:15,397 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436254054 2024-11-12T18:31:15,397 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436254054 after 0ms 2024-11-12T18:31:15,397 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436254054 2024-11-12T18:31:15,408 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731436254960/Put/vlen=218/seqid=0] 2024-11-12T18:31:15,409 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731436264625/Put/vlen=1045/seqid=0] 2024-11-12T18:31:15,409 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436254054 2024-11-12T18:31:15,409 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436267962 2024-11-12T18:31:15,409 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436267962 2024-11-12T18:31:15,409 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436267962 after 0ms 2024-11-12T18:31:15,409 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436267962 2024-11-12T18:31:15,417 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731436267961/Put/vlen=1045/seqid=0] 2024-11-12T18:31:15,418 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731436269990/Put/vlen=1045/seqid=0] 2024-11-12T18:31:15,418 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436267962 2024-11-12T18:31:15,418 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436273374 2024-11-12T18:31:15,418 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436273374 2024-11-12T18:31:15,418 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436273374 after 0ms 2024-11-12T18:31:15,418 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436273374 2024-11-12T18:31:15,424 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731436273373/Put/vlen=1045/seqid=0] 2024-11-12T18:31:15,424 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436275383 2024-11-12T18:31:15,424 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436275383 2024-11-12T18:31:15,425 WARN [IPC Server handler 1 on default port 40969 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436275383 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-12T18:31:15,425 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436275383 after 1ms 2024-11-12T18:31:16,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:16,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:16,262 WARN [ResponseProcessor for block BP-1633596070-172.17.0.3-1731436252766:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1633596070-172.17.0.3-1731436252766:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:16,262 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_412045516_22 at /127.0.0.1:51614 [Receiving block BP-1633596070-172.17.0.3-1731436252766:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:43127:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51614 dst: /127.0.0.1:43127 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:43127 remote=/127.0.0.1:51614]. Total timeout mills is 60000, 59127 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:16,262 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_412045516_22 at /127.0.0.1:33264 [Receiving block BP-1633596070-172.17.0.3-1731436252766:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:36081:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33264 dst: /127.0.0.1:36081 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:16,263 WARN [DataStreamer for file /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436275383 block BP-1633596070-172.17.0.3-1731436252766:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1633596070-172.17.0.3-1731436252766:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43127,DS-849e365b-d65f-4a32-9702-e9a8839d490e,DISK], DatanodeInfoWithStorage[127.0.0.1:36081,DS-034a2b91-6a9f-494c-8b05-711f90ff0a49,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43127,DS-849e365b-d65f-4a32-9702-e9a8839d490e,DISK]) is bad. 2024-11-12T18:31:16,264 WARN [DataStreamer for file /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436275383 block BP-1633596070-172.17.0.3-1731436252766:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1633596070-172.17.0.3-1731436252766:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:16,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43127 is added to blk_1073741839_1022 (size=85) 2024-11-12T18:31:16,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36081 is added to blk_1073741839_1022 (size=85) 2024-11-12T18:31:17,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:17,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:17,383 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436267962 after 4002ms 2024-11-12T18:31:18,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:18,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:19,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:19,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:19,426 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436275383 after 4002ms 2024-11-12T18:31:19,426 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436275383 2024-11-12T18:31:19,430 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436275383 2024-11-12T18:31:19,431 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing d37ab61228f529a0c0c1733f5f2ded2c 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-12T18:31:19,431 ERROR [FSHLog-0-hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c-prefix:9911683f163c,42983,1731436253666 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1633596070-172.17.0.3-1731436252766:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:19,431 WARN [FSHLog-0-hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c-prefix:9911683f163c,42983,1731436253666 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1633596070-172.17.0.3-1731436252766:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:19,432 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 9911683f163c%2C42983%2C1731436253666:(num 1731436275383) roll requested 2024-11-12T18:31:19,432 INFO [regionserver/9911683f163c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C42983%2C1731436253666.1731436279432 2024-11-12T18:31:19,442 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436275383 newFile=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436279432 2024-11-12T18:31:19,442 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:19,442 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:19,442 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:19,442 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:19,443 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:19,443 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436275383 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436279432 2024-11-12T18:31:19,443 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1633596070-172.17.0.3-1731436252766:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:19,443 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1633596070-172.17.0.3-1731436252766:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:19,444 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436275383 2024-11-12T18:31:19,444 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436275383 after 0ms 2024-11-12T18:31:19,449 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.1731436275383 to hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/oldWALs/9911683f163c%2C42983%2C1731436253666.1731436275383 2024-11-12T18:31:19,450 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33013:33013),(127.0.0.1/127.0.0.1:46865:46865)] 2024-11-12T18:31:19,475 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/default/TestLogRolling-testLogRollOnPipelineRestart/d37ab61228f529a0c0c1733f5f2ded2c/.tmp/info/69922c3257fd429cb8100cb9aabe6981 is 1080, key is row1002/info:/1731436264625/Put/seqid=0 2024-11-12T18:31:19,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36081 is added to blk_1073741841_1024 (size=9270) 2024-11-12T18:31:19,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43127 is added to blk_1073741841_1024 (size=9270) 2024-11-12T18:31:19,482 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/default/TestLogRolling-testLogRollOnPipelineRestart/d37ab61228f529a0c0c1733f5f2ded2c/.tmp/info/69922c3257fd429cb8100cb9aabe6981 2024-11-12T18:31:19,490 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/default/TestLogRolling-testLogRollOnPipelineRestart/d37ab61228f529a0c0c1733f5f2ded2c/.tmp/info/69922c3257fd429cb8100cb9aabe6981 as hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/default/TestLogRolling-testLogRollOnPipelineRestart/d37ab61228f529a0c0c1733f5f2ded2c/info/69922c3257fd429cb8100cb9aabe6981 2024-11-12T18:31:19,497 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/default/TestLogRolling-testLogRollOnPipelineRestart/d37ab61228f529a0c0c1733f5f2ded2c/info/69922c3257fd429cb8100cb9aabe6981, entries=4, sequenceid=8, filesize=9.1 K 2024-11-12T18:31:19,498 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for d37ab61228f529a0c0c1733f5f2ded2c in 68ms, sequenceid=8, compaction requested=false 2024-11-12T18:31:19,498 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for d37ab61228f529a0c0c1733f5f2ded2c: 2024-11-12T18:31:19,498 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-12T18:31:19,499 ERROR [FSHLog-0-hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c-prefix:9911683f163c,42983,1731436253666.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39159,DS-849e365b-d65f-4a32-9702-e9a8839d490e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:19,499 WARN [FSHLog-0-hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c-prefix:9911683f163c,42983,1731436253666.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39159,DS-849e365b-d65f-4a32-9702-e9a8839d490e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:19,499 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 9911683f163c%2C42983%2C1731436253666.meta:.meta(num 1731436254445) roll requested 2024-11-12T18:31:19,499 INFO [regionserver/9911683f163c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C42983%2C1731436253666.meta.1731436279499.meta 2024-11-12T18:31:19,506 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:19,506 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:19,506 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:19,506 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:19,506 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:19,506 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.meta.1731436254445.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.meta.1731436279499.meta 2024-11-12T18:31:19,507 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39159,DS-849e365b-d65f-4a32-9702-e9a8839d490e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:19,507 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39159,DS-849e365b-d65f-4a32-9702-e9a8839d490e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:19,507 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.meta.1731436254445.meta 2024-11-12T18:31:19,508 WARN [IPC Server handler 1 on default port 40969 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.meta.1731436254445.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1014 2024-11-12T18:31:19,508 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.meta.1731436254445.meta after 1ms 2024-11-12T18:31:19,510 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46865:46865),(127.0.0.1/127.0.0.1:33013:33013)] 2024-11-12T18:31:19,510 DEBUG [regionserver/9911683f163c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.meta.1731436254445.meta is not closed yet, will try archiving it next time 2024-11-12T18:31:19,528 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740/.tmp/info/740fd1c7d45241d2bba468d9d0f710cf is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c./info:regioninfo/1731436254965/Put/seqid=0 2024-11-12T18:31:19,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43127 is added to blk_1073741843_1027 (size=7125) 2024-11-12T18:31:19,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36081 is added to blk_1073741843_1027 (size=7125) 2024-11-12T18:31:19,535 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740/.tmp/info/740fd1c7d45241d2bba468d9d0f710cf 2024-11-12T18:31:19,561 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740/.tmp/ns/4009a83131c5477fa15bbeb674368960 is 43, key is default/ns:d/1731436254487/Put/seqid=0 2024-11-12T18:31:19,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36081 is added to blk_1073741844_1028 (size=5153) 2024-11-12T18:31:19,575 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740/.tmp/ns/4009a83131c5477fa15bbeb674368960 2024-11-12T18:31:19,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43127 is added to blk_1073741844_1028 (size=5153) 2024-11-12T18:31:19,607 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740/.tmp/table/8d3f5998382f4058ac61943c40d65aee is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731436254977/Put/seqid=0 2024-11-12T18:31:19,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36081 is added to blk_1073741845_1029 (size=5438) 2024-11-12T18:31:19,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43127 is added to blk_1073741845_1029 (size=5438) 2024-11-12T18:31:19,619 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740/.tmp/table/8d3f5998382f4058ac61943c40d65aee 2024-11-12T18:31:19,627 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740/.tmp/info/740fd1c7d45241d2bba468d9d0f710cf as hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740/info/740fd1c7d45241d2bba468d9d0f710cf 2024-11-12T18:31:19,633 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740/info/740fd1c7d45241d2bba468d9d0f710cf, entries=10, sequenceid=11, filesize=7.0 K 2024-11-12T18:31:19,634 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740/.tmp/ns/4009a83131c5477fa15bbeb674368960 as hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740/ns/4009a83131c5477fa15bbeb674368960 2024-11-12T18:31:19,642 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740/ns/4009a83131c5477fa15bbeb674368960, entries=2, sequenceid=11, filesize=5.0 K 2024-11-12T18:31:19,643 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740/.tmp/table/8d3f5998382f4058ac61943c40d65aee as hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740/table/8d3f5998382f4058ac61943c40d65aee 2024-11-12T18:31:19,650 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740/table/8d3f5998382f4058ac61943c40d65aee, entries=2, sequenceid=11, filesize=5.3 K 2024-11-12T18:31:19,651 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 153ms, sequenceid=11, compaction requested=false 2024-11-12T18:31:19,651 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-12T18:31:19,658 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-12T18:31:19,658 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T18:31:19,658 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:31:19,658 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:19,659 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:19,659 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-12T18:31:19,659 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-12T18:31:19,659 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=187518992, stopped=false 2024-11-12T18:31:19,659 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=9911683f163c,45187,1731436253621 2024-11-12T18:31:19,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:31:19,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42983-0x10035429c430001, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:31:19,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42983-0x10035429c430001, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:19,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:19,661 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T18:31:19,661 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T18:31:19,662 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42983-0x10035429c430001, quorum=127.0.0.1:52165, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:31:19,662 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:31:19,662 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:31:19,662 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:19,663 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9911683f163c,42983,1731436253666' ***** 2024-11-12T18:31:19,663 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T18:31:19,663 INFO [RS:0;9911683f163c:42983 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T18:31:19,663 INFO [RS:0;9911683f163c:42983 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T18:31:19,663 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T18:31:19,663 INFO [RS:0;9911683f163c:42983 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T18:31:19,663 INFO [RS:0;9911683f163c:42983 {}] regionserver.HRegionServer(3091): Received CLOSE for d37ab61228f529a0c0c1733f5f2ded2c 2024-11-12T18:31:19,664 INFO [RS:0;9911683f163c:42983 {}] regionserver.HRegionServer(959): stopping server 9911683f163c,42983,1731436253666 2024-11-12T18:31:19,664 INFO [RS:0;9911683f163c:42983 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:31:19,664 INFO [RS:0;9911683f163c:42983 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;9911683f163c:42983. 2024-11-12T18:31:19,664 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d37ab61228f529a0c0c1733f5f2ded2c, disabling compactions & flushes 2024-11-12T18:31:19,664 DEBUG [RS:0;9911683f163c:42983 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:31:19,664 DEBUG [RS:0;9911683f163c:42983 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:19,664 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c. 2024-11-12T18:31:19,664 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c. 2024-11-12T18:31:19,664 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c. after waiting 0 ms 2024-11-12T18:31:19,664 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c. 2024-11-12T18:31:19,664 INFO [RS:0;9911683f163c:42983 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T18:31:19,664 INFO [RS:0;9911683f163c:42983 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T18:31:19,664 INFO [RS:0;9911683f163c:42983 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T18:31:19,664 INFO [RS:0;9911683f163c:42983 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-12T18:31:19,670 INFO [RS:0;9911683f163c:42983 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-12T18:31:19,670 DEBUG [RS:0;9911683f163c:42983 {}] regionserver.HRegionServer(1325): Online Regions={d37ab61228f529a0c0c1733f5f2ded2c=TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c., 1588230740=hbase:meta,,1.1588230740} 2024-11-12T18:31:19,670 DEBUG [RS:0;9911683f163c:42983 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, d37ab61228f529a0c0c1733f5f2ded2c 2024-11-12T18:31:19,670 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T18:31:19,670 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T18:31:19,670 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T18:31:19,670 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T18:31:19,670 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T18:31:19,677 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-12T18:31:19,677 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T18:31:19,677 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T18:31:19,678 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731436279670Running coprocessor pre-close hooks at 1731436279670Disabling compacts and flushes for region at 1731436279670Disabling writes for close at 1731436279670Writing region close event to WAL at 1731436279672 (+2 ms)Running coprocessor post-close hooks at 1731436279677 (+5 ms)Closed at 1731436279677 2024-11-12T18:31:19,678 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-12T18:31:19,681 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/data/default/TestLogRolling-testLogRollOnPipelineRestart/d37ab61228f529a0c0c1733f5f2ded2c/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-12T18:31:19,682 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c. 2024-11-12T18:31:19,682 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d37ab61228f529a0c0c1733f5f2ded2c: Waiting for close lock at 1731436279664Running coprocessor pre-close hooks at 1731436279664Disabling compacts and flushes for region at 1731436279664Disabling writes for close at 1731436279664Writing region close event to WAL at 1731436279670 (+6 ms)Running coprocessor post-close hooks at 1731436279682 (+12 ms)Closed at 1731436279682 2024-11-12T18:31:19,682 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731436254602.d37ab61228f529a0c0c1733f5f2ded2c. 2024-11-12T18:31:19,870 INFO [RS:0;9911683f163c:42983 {}] regionserver.HRegionServer(976): stopping server 9911683f163c,42983,1731436253666; all regions closed. 2024-11-12T18:31:19,871 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:19,871 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:19,871 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:19,871 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:19,872 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:19,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36081 is added to blk_1073741842_1025 (size=825) 2024-11-12T18:31:19,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43127 is added to blk_1073741842_1025 (size=825) 2024-11-12T18:31:19,918 INFO [regionserver/9911683f163c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-12T18:31:19,918 INFO [regionserver/9911683f163c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-12T18:31:19,919 INFO [regionserver/9911683f163c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:31:20,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:20,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:21,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:21,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:22,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:22,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:23,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:23,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:23,452 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-12T18:31:23,509 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.meta.1731436254445.meta after 4002ms 2024-11-12T18:31:23,513 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/WALs/9911683f163c,42983,1731436253666/9911683f163c%2C42983%2C1731436253666.meta.1731436254445.meta to hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/oldWALs/9911683f163c%2C42983%2C1731436253666.meta.1731436254445.meta 2024-11-12T18:31:23,523 DEBUG [RS:0;9911683f163c:42983 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/oldWALs 2024-11-12T18:31:23,523 INFO [RS:0;9911683f163c:42983 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9911683f163c%2C42983%2C1731436253666.meta:.meta(num 1731436279499) 2024-11-12T18:31:23,524 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:23,524 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:23,525 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:23,525 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:23,525 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:23,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43127 is added to blk_1073741840_1023 (size=1162) 2024-11-12T18:31:23,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36081 is added to blk_1073741840_1023 (size=1162) 2024-11-12T18:31:23,540 DEBUG [RS:0;9911683f163c:42983 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/oldWALs 2024-11-12T18:31:23,540 INFO [RS:0;9911683f163c:42983 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9911683f163c%2C42983%2C1731436253666:(num 1731436279432) 2024-11-12T18:31:23,540 DEBUG [RS:0;9911683f163c:42983 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:23,540 INFO [RS:0;9911683f163c:42983 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:31:23,540 INFO [RS:0;9911683f163c:42983 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:31:23,540 INFO [RS:0;9911683f163c:42983 {}] hbase.ChoreService(370): Chore service for: regionserver/9911683f163c:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-12T18:31:23,540 INFO [RS:0;9911683f163c:42983 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:31:23,541 INFO [RS:0;9911683f163c:42983 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42983 2024-11-12T18:31:23,542 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:31:23,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42983-0x10035429c430001, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9911683f163c,42983,1731436253666 2024-11-12T18:31:23,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:31:23,545 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9911683f163c,42983,1731436253666] 2024-11-12T18:31:23,546 INFO [RS:0;9911683f163c:42983 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:31:23,547 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9911683f163c,42983,1731436253666 already deleted, retry=false 2024-11-12T18:31:23,547 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9911683f163c,42983,1731436253666 expired; onlineServers=0 2024-11-12T18:31:23,547 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '9911683f163c,45187,1731436253621' ***** 2024-11-12T18:31:23,547 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-12T18:31:23,547 INFO [M:0;9911683f163c:45187 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:31:23,547 INFO [M:0;9911683f163c:45187 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:31:23,547 DEBUG [M:0;9911683f163c:45187 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-12T18:31:23,547 DEBUG [M:0;9911683f163c:45187 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-12T18:31:23,547 DEBUG [master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436253835 {}] cleaner.HFileCleaner(306): Exit Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436253835,5,FailOnTimeoutGroup] 2024-11-12T18:31:23,547 DEBUG [master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436253835 {}] cleaner.HFileCleaner(306): Exit Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436253835,5,FailOnTimeoutGroup] 2024-11-12T18:31:23,548 INFO [M:0;9911683f163c:45187 {}] hbase.ChoreService(370): Chore service for: master/9911683f163c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-12T18:31:23,548 INFO [M:0;9911683f163c:45187 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:31:23,548 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-12T18:31:23,549 DEBUG [M:0;9911683f163c:45187 {}] master.HMaster(1795): Stopping service threads 2024-11-12T18:31:23,549 INFO [M:0;9911683f163c:45187 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-12T18:31:23,549 INFO [M:0;9911683f163c:45187 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T18:31:23,549 INFO [M:0;9911683f163c:45187 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-12T18:31:23,549 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-12T18:31:23,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-12T18:31:23,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:23,551 DEBUG [M:0;9911683f163c:45187 {}] zookeeper.ZKUtil(347): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-12T18:31:23,551 WARN [M:0;9911683f163c:45187 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-12T18:31:23,552 INFO [M:0;9911683f163c:45187 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/.lastflushedseqids 2024-11-12T18:31:23,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36081 is added to blk_1073741846_1030 (size=130) 2024-11-12T18:31:23,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43127 is added to blk_1073741846_1030 (size=130) 2024-11-12T18:31:23,571 INFO [M:0;9911683f163c:45187 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-12T18:31:23,571 INFO [M:0;9911683f163c:45187 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-12T18:31:23,571 DEBUG [M:0;9911683f163c:45187 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T18:31:23,571 INFO [M:0;9911683f163c:45187 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:23,571 DEBUG [M:0;9911683f163c:45187 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:23,571 DEBUG [M:0;9911683f163c:45187 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T18:31:23,571 DEBUG [M:0;9911683f163c:45187 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:23,572 INFO [M:0;9911683f163c:45187 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-11-12T18:31:23,572 ERROR [FSHLog-0-hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData-prefix:9911683f163c,45187,1731436253621 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39159,DS-849e365b-d65f-4a32-9702-e9a8839d490e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:23,572 WARN [FSHLog-0-hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData-prefix:9911683f163c,45187,1731436253621 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39159,DS-849e365b-d65f-4a32-9702-e9a8839d490e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:23,572 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 9911683f163c%2C45187%2C1731436253621:(num 1731436253759) roll requested 2024-11-12T18:31:23,573 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C45187%2C1731436253621.1731436283572 2024-11-12T18:31:23,578 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:23,578 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:23,578 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:23,578 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:23,578 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:23,578 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/WALs/9911683f163c,45187,1731436253621/9911683f163c%2C45187%2C1731436253621.1731436253759 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/WALs/9911683f163c,45187,1731436253621/9911683f163c%2C45187%2C1731436253621.1731436283572 2024-11-12T18:31:23,579 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39159,DS-849e365b-d65f-4a32-9702-e9a8839d490e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:23,579 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39159,DS-849e365b-d65f-4a32-9702-e9a8839d490e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-12T18:31:23,579 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/WALs/9911683f163c,45187,1731436253621/9911683f163c%2C45187%2C1731436253621.1731436253759 2024-11-12T18:31:23,579 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33013:33013),(127.0.0.1/127.0.0.1:46865:46865)] 2024-11-12T18:31:23,579 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/WALs/9911683f163c,45187,1731436253621/9911683f163c%2C45187%2C1731436253621.1731436253759 is not closed yet, will try archiving it next time 2024-11-12T18:31:23,579 WARN [IPC Server handler 1 on default port 40969 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/WALs/9911683f163c,45187,1731436253621/9911683f163c%2C45187%2C1731436253621.1731436253759 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-12T18:31:23,580 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/WALs/9911683f163c,45187,1731436253621/9911683f163c%2C45187%2C1731436253621.1731436253759 after 0ms 2024-11-12T18:31:23,598 DEBUG [M:0;9911683f163c:45187 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cb4fa635b85848f0b517f64421cb56cb is 82, key is hbase:meta,,1/info:regioninfo/1731436254471/Put/seqid=0 2024-11-12T18:31:23,602 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-12T18:31:23,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43127 is added to blk_1073741848_1033 (size=5672) 2024-11-12T18:31:23,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36081 is added to blk_1073741848_1033 (size=5672) 2024-11-12T18:31:23,614 INFO [M:0;9911683f163c:45187 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cb4fa635b85848f0b517f64421cb56cb 2024-11-12T18:31:23,644 DEBUG [M:0;9911683f163c:45187 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/51e934ba5c28425886294fff70618357 is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731436254982/Put/seqid=0 2024-11-12T18:31:23,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42983-0x10035429c430001, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:31:23,649 INFO [RS:0;9911683f163c:42983 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:31:23,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42983-0x10035429c430001, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:31:23,649 INFO [RS:0;9911683f163c:42983 {}] regionserver.HRegionServer(1031): Exiting; stopping=9911683f163c,42983,1731436253666; zookeeper connection closed. 2024-11-12T18:31:23,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36081 is added to blk_1073741849_1034 (size=6117) 2024-11-12T18:31:23,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43127 is added to blk_1073741849_1034 (size=6117) 2024-11-12T18:31:23,650 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@37a0ab27 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@37a0ab27 2024-11-12T18:31:23,650 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-12T18:31:23,650 INFO [M:0;9911683f163c:45187 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/51e934ba5c28425886294fff70618357 2024-11-12T18:31:23,672 DEBUG [M:0;9911683f163c:45187 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9d1f6035c42e4b8586d92a34f44983c5 is 69, key is 9911683f163c,42983,1731436253666/rs:state/1731436253906/Put/seqid=0 2024-11-12T18:31:23,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36081 is added to blk_1073741850_1035 (size=5156) 2024-11-12T18:31:23,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43127 is added to blk_1073741850_1035 (size=5156) 2024-11-12T18:31:23,682 INFO [M:0;9911683f163c:45187 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9d1f6035c42e4b8586d92a34f44983c5 2024-11-12T18:31:23,702 DEBUG [M:0;9911683f163c:45187 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3f59a043939544f192f80ae6a048e406 is 52, key is load_balancer_on/state:d/1731436254597/Put/seqid=0 2024-11-12T18:31:23,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36081 is added to blk_1073741851_1036 (size=5056) 2024-11-12T18:31:23,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43127 is added to blk_1073741851_1036 (size=5056) 2024-11-12T18:31:23,708 INFO [M:0;9911683f163c:45187 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3f59a043939544f192f80ae6a048e406 2024-11-12T18:31:23,714 DEBUG [M:0;9911683f163c:45187 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cb4fa635b85848f0b517f64421cb56cb as hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cb4fa635b85848f0b517f64421cb56cb 2024-11-12T18:31:23,719 INFO [M:0;9911683f163c:45187 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cb4fa635b85848f0b517f64421cb56cb, entries=8, sequenceid=56, filesize=5.5 K 2024-11-12T18:31:23,720 DEBUG [M:0;9911683f163c:45187 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/51e934ba5c28425886294fff70618357 as hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/51e934ba5c28425886294fff70618357 2024-11-12T18:31:23,725 INFO [M:0;9911683f163c:45187 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/51e934ba5c28425886294fff70618357, entries=6, sequenceid=56, filesize=6.0 K 2024-11-12T18:31:23,726 DEBUG [M:0;9911683f163c:45187 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9d1f6035c42e4b8586d92a34f44983c5 as hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9d1f6035c42e4b8586d92a34f44983c5 2024-11-12T18:31:23,731 INFO [M:0;9911683f163c:45187 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9d1f6035c42e4b8586d92a34f44983c5, entries=1, sequenceid=56, filesize=5.0 K 2024-11-12T18:31:23,732 DEBUG [M:0;9911683f163c:45187 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3f59a043939544f192f80ae6a048e406 as hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3f59a043939544f192f80ae6a048e406 2024-11-12T18:31:23,737 INFO [M:0;9911683f163c:45187 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3f59a043939544f192f80ae6a048e406, entries=1, sequenceid=56, filesize=4.9 K 2024-11-12T18:31:23,738 INFO [M:0;9911683f163c:45187 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 167ms, sequenceid=56, compaction requested=false 2024-11-12T18:31:23,740 INFO [M:0;9911683f163c:45187 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:23,740 DEBUG [M:0;9911683f163c:45187 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731436283571Disabling compacts and flushes for region at 1731436283571Disabling writes for close at 1731436283571Obtaining lock to block concurrent updates at 1731436283572 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731436283572Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1731436283572Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731436283580 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731436283580Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731436283597 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731436283597Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731436283620 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731436283643 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731436283643Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731436283656 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731436283671 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731436283671Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731436283687 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731436283702 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731436283702Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@54ed4ed3: reopening flushed file at 1731436283713 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11d059c5: reopening flushed file at 1731436283719 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7702fbaa: reopening flushed file at 1731436283725 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2099dfd5: reopening flushed file at 1731436283731 (+6 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 167ms, sequenceid=56, compaction requested=false at 1731436283738 (+7 ms)Writing region close event to WAL at 1731436283740 (+2 ms)Closed at 1731436283740 2024-11-12T18:31:23,740 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:23,740 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:23,740 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:23,740 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:23,741 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:23,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43127 is added to blk_1073741847_1031 (size=757) 2024-11-12T18:31:23,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36081 is added to blk_1073741847_1031 (size=757) 2024-11-12T18:31:24,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:24,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:24,678 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:24,678 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:24,690 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:24,690 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:24,690 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:24,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:24,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:24,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:24,694 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:24,694 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:24,694 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:24,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:24,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:24,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:25,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:25,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:25,203 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-12T18:31:25,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:25,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:25,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:25,205 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:25,219 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:25,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:25,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:25,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:25,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:25,221 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:25,224 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:25,224 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:25,224 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:25,227 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:26,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:26,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:26,452 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-12T18:31:27,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:27,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:27,580 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/WALs/9911683f163c,45187,1731436253621/9911683f163c%2C45187%2C1731436253621.1731436253759 after 4001ms 2024-11-12T18:31:27,581 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/WALs/9911683f163c,45187,1731436253621/9911683f163c%2C45187%2C1731436253621.1731436253759 to hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/oldWALs/9911683f163c%2C45187%2C1731436253621.1731436253759 2024-11-12T18:31:27,586 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/MasterData/oldWALs/9911683f163c%2C45187%2C1731436253621.1731436253759 to hdfs://localhost:40969/user/jenkins/test-data/01da9bb8-4d6b-353d-3c5c-18c52171492c/oldWALs/9911683f163c%2C45187%2C1731436253621.1731436253759$masterlocalwal$ 2024-11-12T18:31:27,586 INFO [M:0;9911683f163c:45187 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-12T18:31:27,586 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:31:27,586 INFO [M:0;9911683f163c:45187 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45187 2024-11-12T18:31:27,587 INFO [M:0;9911683f163c:45187 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:31:27,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:31:27,689 INFO [M:0;9911683f163c:45187 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:31:27,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45187-0x10035429c430000, quorum=127.0.0.1:52165, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:31:27,691 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@66ac2bb4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:27,692 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@76915c89{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:31:27,692 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:31:27,692 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@775ad417{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:31:27,692 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@eed25d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/hadoop.log.dir/,STOPPED} 2024-11-12T18:31:27,693 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:31:27,693 WARN [BP-1633596070-172.17.0.3-1731436252766 heartbeating to localhost/127.0.0.1:40969 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:31:27,693 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:31:27,693 WARN [BP-1633596070-172.17.0.3-1731436252766 heartbeating to localhost/127.0.0.1:40969 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1633596070-172.17.0.3-1731436252766 (Datanode Uuid 1cfcea85-3d35-4012-b30c-0dc74e9b5ecb) service to localhost/127.0.0.1:40969 2024-11-12T18:31:27,694 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/cluster_83f5a921-1b52-4b7d-f60a-fb97fead6060/data/data3/current/BP-1633596070-172.17.0.3-1731436252766 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:27,694 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/cluster_83f5a921-1b52-4b7d-f60a-fb97fead6060/data/data4/current/BP-1633596070-172.17.0.3-1731436252766 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:27,694 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:31:27,696 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@674cebb1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:27,696 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5fc0dbac{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:31:27,697 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:31:27,697 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@bf07770{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:31:27,697 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2bfc4559{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/hadoop.log.dir/,STOPPED} 2024-11-12T18:31:27,698 WARN [BP-1633596070-172.17.0.3-1731436252766 heartbeating to localhost/127.0.0.1:40969 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:31:27,698 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:31:27,698 WARN [BP-1633596070-172.17.0.3-1731436252766 heartbeating to localhost/127.0.0.1:40969 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1633596070-172.17.0.3-1731436252766 (Datanode Uuid 15be3847-062d-4564-9b3f-a38032210215) service to localhost/127.0.0.1:40969 2024-11-12T18:31:27,698 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:31:27,699 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/cluster_83f5a921-1b52-4b7d-f60a-fb97fead6060/data/data1/current/BP-1633596070-172.17.0.3-1731436252766 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:27,699 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/cluster_83f5a921-1b52-4b7d-f60a-fb97fead6060/data/data2/current/BP-1633596070-172.17.0.3-1731436252766 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:31:27,699 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:31:27,705 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@73a5fbf0{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T18:31:27,705 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@27c14f1b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:31:27,705 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:31:27,705 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5864970e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:31:27,705 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22146658{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/hadoop.log.dir/,STOPPED} 2024-11-12T18:31:27,712 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-12T18:31:27,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-12T18:31:27,737 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 153) Potentially hanging thread: IPC Client (122747033) connection to localhost/127.0.0.1:40969 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40969 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (122747033) connection to localhost/127.0.0.1:40969 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40969 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40969 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:40969 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40969 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (122747033) connection to localhost/127.0.0.1:40969 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=455 (was 454) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=172 (was 189), ProcessCount=11 (was 11), AvailableMemoryMB=6220 (was 6132) - AvailableMemoryMB LEAK? - 2024-11-12T18:31:27,745 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=172, ProcessCount=11, AvailableMemoryMB=6220 2024-11-12T18:31:27,745 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-12T18:31:27,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/hadoop.log.dir so I do NOT create it in target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b 2024-11-12T18:31:27,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c33f20e-98bd-342c-5035-89ec4e3c72d6/hadoop.tmp.dir so I do NOT create it in target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b 2024-11-12T18:31:27,746 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/cluster_abd5de15-dadd-f4e0-5d64-6879e7382ac2, deleteOnExit=true 2024-11-12T18:31:27,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-12T18:31:27,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/test.cache.data in system properties and HBase conf 2024-11-12T18:31:27,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/hadoop.tmp.dir in system properties and HBase conf 2024-11-12T18:31:27,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/hadoop.log.dir in system properties and HBase conf 2024-11-12T18:31:27,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-12T18:31:27,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-12T18:31:27,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-12T18:31:27,746 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-12T18:31:27,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-12T18:31:27,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-12T18:31:27,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-12T18:31:27,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T18:31:27,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-12T18:31:27,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-12T18:31:27,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T18:31:27,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T18:31:27,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-12T18:31:27,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/nfs.dump.dir in system properties and HBase conf 2024-11-12T18:31:27,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/java.io.tmpdir in system properties and HBase conf 2024-11-12T18:31:27,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T18:31:27,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-12T18:31:27,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-12T18:31:27,760 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-12T18:31:27,829 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:31:27,834 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:31:27,835 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:31:27,835 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:31:27,835 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:31:27,836 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:31:27,836 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6db0a9fa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:31:27,836 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b2ab1bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:31:27,950 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@555de183{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/java.io.tmpdir/jetty-localhost-42337-hadoop-hdfs-3_4_1-tests_jar-_-any-15776707982923391621/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T18:31:27,950 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@796376f7{HTTP/1.1, (http/1.1)}{localhost:42337} 2024-11-12T18:31:27,950 INFO [Time-limited test {}] server.Server(415): Started @183712ms 2024-11-12T18:31:27,963 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-12T18:31:28,013 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:31:28,017 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:31:28,017 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:31:28,017 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:31:28,017 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:31:28,018 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3edbaed2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:31:28,018 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5218abd2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:31:28,139 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6aa18e30{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/java.io.tmpdir/jetty-localhost-45451-hadoop-hdfs-3_4_1-tests_jar-_-any-6120533146219452558/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:28,140 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@37be9df5{HTTP/1.1, (http/1.1)}{localhost:45451} 2024-11-12T18:31:28,140 INFO [Time-limited test {}] server.Server(415): Started @183902ms 2024-11-12T18:31:28,141 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:31:28,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:28,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:28,203 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:31:28,206 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:31:28,209 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:31:28,209 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:31:28,209 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:31:28,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e776489{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:31:28,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10bb53c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:31:28,284 WARN [Thread-1631 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/cluster_abd5de15-dadd-f4e0-5d64-6879e7382ac2/data/data1/current/BP-6681107-172.17.0.3-1731436287778/current, will proceed with Du for space computation calculation, 2024-11-12T18:31:28,290 WARN [Thread-1632 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/cluster_abd5de15-dadd-f4e0-5d64-6879e7382ac2/data/data2/current/BP-6681107-172.17.0.3-1731436287778/current, will proceed with Du for space computation calculation, 2024-11-12T18:31:28,319 WARN [Thread-1610 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:31:28,323 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xef1ad2913903232e with lease ID 0x99732998bba550be: Processing first storage report for DS-e249d6ee-f916-4325-a519-69a8d83202b2 from datanode DatanodeRegistration(127.0.0.1:44871, datanodeUuid=451c84e1-fb9f-42b8-8992-ef6cc6267549, infoPort=33303, infoSecurePort=0, ipcPort=42495, storageInfo=lv=-57;cid=testClusterID;nsid=389510577;c=1731436287778) 2024-11-12T18:31:28,323 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xef1ad2913903232e with lease ID 0x99732998bba550be: from storage DS-e249d6ee-f916-4325-a519-69a8d83202b2 node DatanodeRegistration(127.0.0.1:44871, datanodeUuid=451c84e1-fb9f-42b8-8992-ef6cc6267549, infoPort=33303, infoSecurePort=0, ipcPort=42495, storageInfo=lv=-57;cid=testClusterID;nsid=389510577;c=1731436287778), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:31:28,324 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xef1ad2913903232e with lease ID 0x99732998bba550be: Processing first storage report for DS-555b9766-998d-4603-bc86-b281880cd102 from datanode DatanodeRegistration(127.0.0.1:44871, datanodeUuid=451c84e1-fb9f-42b8-8992-ef6cc6267549, infoPort=33303, infoSecurePort=0, ipcPort=42495, storageInfo=lv=-57;cid=testClusterID;nsid=389510577;c=1731436287778) 2024-11-12T18:31:28,324 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xef1ad2913903232e with lease ID 0x99732998bba550be: from storage DS-555b9766-998d-4603-bc86-b281880cd102 node DatanodeRegistration(127.0.0.1:44871, datanodeUuid=451c84e1-fb9f-42b8-8992-ef6cc6267549, infoPort=33303, infoSecurePort=0, ipcPort=42495, storageInfo=lv=-57;cid=testClusterID;nsid=389510577;c=1731436287778), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:31:28,373 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7273ffd9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/java.io.tmpdir/jetty-localhost-46767-hadoop-hdfs-3_4_1-tests_jar-_-any-7811196022258521040/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:31:28,374 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5501be24{HTTP/1.1, (http/1.1)}{localhost:46767} 2024-11-12T18:31:28,374 INFO [Time-limited test {}] server.Server(415): Started @184135ms 2024-11-12T18:31:28,375 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:31:28,491 WARN [Thread-1658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/cluster_abd5de15-dadd-f4e0-5d64-6879e7382ac2/data/data4/current/BP-6681107-172.17.0.3-1731436287778/current, will proceed with Du for space computation calculation, 2024-11-12T18:31:28,491 WARN [Thread-1657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/cluster_abd5de15-dadd-f4e0-5d64-6879e7382ac2/data/data3/current/BP-6681107-172.17.0.3-1731436287778/current, will proceed with Du for space computation calculation, 2024-11-12T18:31:28,510 WARN [Thread-1646 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:31:28,512 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdcedef2490bc2ae3 with lease ID 0x99732998bba550bf: Processing first storage report for DS-27cf117f-f63a-48bd-b8b0-c3e6c226e983 from datanode DatanodeRegistration(127.0.0.1:43357, datanodeUuid=19019242-c273-42e8-8959-7efd37ec4e04, infoPort=37587, infoSecurePort=0, ipcPort=38995, storageInfo=lv=-57;cid=testClusterID;nsid=389510577;c=1731436287778) 2024-11-12T18:31:28,512 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdcedef2490bc2ae3 with lease ID 0x99732998bba550bf: from storage DS-27cf117f-f63a-48bd-b8b0-c3e6c226e983 node DatanodeRegistration(127.0.0.1:43357, datanodeUuid=19019242-c273-42e8-8959-7efd37ec4e04, infoPort=37587, infoSecurePort=0, ipcPort=38995, storageInfo=lv=-57;cid=testClusterID;nsid=389510577;c=1731436287778), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:31:28,512 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdcedef2490bc2ae3 with lease ID 0x99732998bba550bf: Processing first storage report for DS-400d10c8-66f1-4a69-9e34-73c6f39be257 from datanode DatanodeRegistration(127.0.0.1:43357, datanodeUuid=19019242-c273-42e8-8959-7efd37ec4e04, infoPort=37587, infoSecurePort=0, ipcPort=38995, storageInfo=lv=-57;cid=testClusterID;nsid=389510577;c=1731436287778) 2024-11-12T18:31:28,513 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdcedef2490bc2ae3 with lease ID 0x99732998bba550bf: from storage DS-400d10c8-66f1-4a69-9e34-73c6f39be257 node DatanodeRegistration(127.0.0.1:43357, datanodeUuid=19019242-c273-42e8-8959-7efd37ec4e04, infoPort=37587, infoSecurePort=0, ipcPort=38995, storageInfo=lv=-57;cid=testClusterID;nsid=389510577;c=1731436287778), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:31:28,528 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b 2024-11-12T18:31:28,537 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/cluster_abd5de15-dadd-f4e0-5d64-6879e7382ac2/zookeeper_0, clientPort=55274, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/cluster_abd5de15-dadd-f4e0-5d64-6879e7382ac2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/cluster_abd5de15-dadd-f4e0-5d64-6879e7382ac2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-12T18:31:28,539 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55274 2024-11-12T18:31:28,539 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:28,541 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:28,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741825_1001 (size=7) 2024-11-12T18:31:28,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741825_1001 (size=7) 2024-11-12T18:31:28,559 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1 with version=8 2024-11-12T18:31:28,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/hbase-staging 2024-11-12T18:31:28,561 INFO [Time-limited test {}] client.ConnectionUtils(128): master/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:31:28,561 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:28,561 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:28,561 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:31:28,561 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:28,561 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:31:28,561 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-12T18:31:28,562 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:31:28,562 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:46127 2024-11-12T18:31:28,564 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46127 connecting to ZooKeeper ensemble=127.0.0.1:55274 2024-11-12T18:31:28,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:461270x0, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:31:28,571 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46127-0x100354324b40000 connected 2024-11-12T18:31:28,596 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:28,597 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:28,601 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:31:28,601 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1, hbase.cluster.distributed=false 2024-11-12T18:31:28,602 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:31:28,604 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46127 2024-11-12T18:31:28,605 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46127 2024-11-12T18:31:28,606 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46127 2024-11-12T18:31:28,608 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46127 2024-11-12T18:31:28,609 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46127 2024-11-12T18:31:28,626 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:31:28,626 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:28,626 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:28,626 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:31:28,626 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:31:28,626 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:31:28,626 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T18:31:28,627 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:31:28,627 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37059 2024-11-12T18:31:28,629 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37059 connecting to ZooKeeper ensemble=127.0.0.1:55274 2024-11-12T18:31:28,630 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:28,632 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:28,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:370590x0, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:31:28,638 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37059-0x100354324b40001 connected 2024-11-12T18:31:28,638 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:31:28,638 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T18:31:28,642 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T18:31:28,643 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T18:31:28,644 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:31:28,646 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37059 2024-11-12T18:31:28,646 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37059 2024-11-12T18:31:28,647 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37059 2024-11-12T18:31:28,650 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37059 2024-11-12T18:31:28,651 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37059 2024-11-12T18:31:28,665 DEBUG [M:0;9911683f163c:46127 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;9911683f163c:46127 2024-11-12T18:31:28,665 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/9911683f163c,46127,1731436288561 2024-11-12T18:31:28,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:31:28,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:31:28,668 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/9911683f163c,46127,1731436288561 2024-11-12T18:31:28,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T18:31:28,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:28,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:28,670 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-12T18:31:28,671 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/9911683f163c,46127,1731436288561 from backup master directory 2024-11-12T18:31:28,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/9911683f163c,46127,1731436288561 2024-11-12T18:31:28,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:31:28,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:31:28,672 WARN [master/9911683f163c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:31:28,672 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=9911683f163c,46127,1731436288561 2024-11-12T18:31:28,678 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/hbase.id] with ID: 1a7c822f-4594-4a29-9e60-ebcbc7ee7901 2024-11-12T18:31:28,678 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/.tmp/hbase.id 2024-11-12T18:31:28,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741826_1002 (size=42) 2024-11-12T18:31:28,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741826_1002 (size=42) 2024-11-12T18:31:28,686 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/.tmp/hbase.id]:[hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/hbase.id] 2024-11-12T18:31:28,697 INFO [master/9911683f163c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:28,698 INFO [master/9911683f163c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-12T18:31:28,699 INFO [master/9911683f163c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-12T18:31:28,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:28,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:28,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741827_1003 (size=196) 2024-11-12T18:31:28,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741827_1003 (size=196) 2024-11-12T18:31:28,708 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T18:31:28,709 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-12T18:31:28,709 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:31:28,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741828_1004 (size=1189) 2024-11-12T18:31:28,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741828_1004 (size=1189) 2024-11-12T18:31:28,718 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store 2024-11-12T18:31:28,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741829_1005 (size=34) 2024-11-12T18:31:28,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741829_1005 (size=34) 2024-11-12T18:31:28,727 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:31:28,727 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T18:31:28,727 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:28,727 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:28,727 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T18:31:28,727 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:28,727 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:31:28,727 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731436288727Disabling compacts and flushes for region at 1731436288727Disabling writes for close at 1731436288727Writing region close event to WAL at 1731436288727Closed at 1731436288727 2024-11-12T18:31:28,728 WARN [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/.initializing 2024-11-12T18:31:28,728 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/WALs/9911683f163c,46127,1731436288561 2024-11-12T18:31:28,731 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C46127%2C1731436288561, suffix=, logDir=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/WALs/9911683f163c,46127,1731436288561, archiveDir=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/oldWALs, maxLogs=10 2024-11-12T18:31:28,731 INFO [master/9911683f163c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C46127%2C1731436288561.1731436288731 2024-11-12T18:31:28,737 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/WALs/9911683f163c,46127,1731436288561/9911683f163c%2C46127%2C1731436288561.1731436288731 2024-11-12T18:31:28,740 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37587:37587),(127.0.0.1/127.0.0.1:33303:33303)] 2024-11-12T18:31:28,744 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:31:28,745 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:31:28,745 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:28,745 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:28,746 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:28,748 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-12T18:31:28,748 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:28,749 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:28,749 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:28,750 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-12T18:31:28,750 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:28,750 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:31:28,750 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:28,751 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-12T18:31:28,751 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:28,752 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:31:28,752 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:28,753 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-12T18:31:28,753 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:28,753 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:31:28,753 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:28,754 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:28,755 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:28,756 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:28,756 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:28,757 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T18:31:28,758 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:31:28,760 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:31:28,760 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=817264, jitterRate=0.03920562565326691}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T18:31:28,761 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731436288745Initializing all the Stores at 1731436288746 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436288746Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436288746Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436288746Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436288746Cleaning up temporary data from old regions at 1731436288756 (+10 ms)Region opened successfully at 1731436288761 (+5 ms) 2024-11-12T18:31:28,761 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-12T18:31:28,764 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c872c1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:31:28,765 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-12T18:31:28,766 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-12T18:31:28,766 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-12T18:31:28,766 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-12T18:31:28,766 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-12T18:31:28,767 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-12T18:31:28,767 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-12T18:31:28,769 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-12T18:31:28,769 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-12T18:31:28,771 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-12T18:31:28,771 INFO [master/9911683f163c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-12T18:31:28,771 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-12T18:31:28,772 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-12T18:31:28,773 INFO [master/9911683f163c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-12T18:31:28,774 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-12T18:31:28,775 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-12T18:31:28,775 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-12T18:31:28,776 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-12T18:31:28,778 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-12T18:31:28,779 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-12T18:31:28,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:31:28,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:31:28,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:28,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:28,781 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=9911683f163c,46127,1731436288561, sessionid=0x100354324b40000, setting cluster-up flag (Was=false) 2024-11-12T18:31:28,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:28,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:28,787 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-12T18:31:28,788 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9911683f163c,46127,1731436288561 2024-11-12T18:31:28,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:28,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:28,795 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-12T18:31:28,796 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9911683f163c,46127,1731436288561 2024-11-12T18:31:28,797 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-12T18:31:28,799 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-12T18:31:28,799 INFO [master/9911683f163c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-12T18:31:28,799 INFO [master/9911683f163c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-12T18:31:28,800 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 9911683f163c,46127,1731436288561 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-12T18:31:28,801 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:31:28,801 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:31:28,801 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:31:28,801 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:31:28,801 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/9911683f163c:0, corePoolSize=10, maxPoolSize=10 2024-11-12T18:31:28,801 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:28,801 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:31:28,801 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:28,802 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731436318802 2024-11-12T18:31:28,802 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-12T18:31:28,802 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-12T18:31:28,802 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-12T18:31:28,802 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-12T18:31:28,802 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-12T18:31:28,802 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-12T18:31:28,802 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:28,803 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-12T18:31:28,803 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-12T18:31:28,803 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:31:28,803 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-12T18:31:28,803 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-12T18:31:28,803 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-12T18:31:28,803 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-12T18:31:28,803 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436288803,5,FailOnTimeoutGroup] 2024-11-12T18:31:28,803 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436288803,5,FailOnTimeoutGroup] 2024-11-12T18:31:28,803 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:28,803 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-12T18:31:28,803 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:28,804 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:28,804 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:28,804 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-12T18:31:28,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741831_1007 (size=1321) 2024-11-12T18:31:28,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741831_1007 (size=1321) 2024-11-12T18:31:28,811 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-12T18:31:28,811 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1 2024-11-12T18:31:28,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741832_1008 (size=32) 2024-11-12T18:31:28,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741832_1008 (size=32) 2024-11-12T18:31:28,817 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:31:28,818 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T18:31:28,819 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T18:31:28,819 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:28,820 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:28,820 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T18:31:28,821 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T18:31:28,821 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:28,821 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:28,822 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T18:31:28,822 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T18:31:28,823 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:28,823 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:28,823 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T18:31:28,824 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T18:31:28,824 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:28,825 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:28,825 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T18:31:28,825 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740 2024-11-12T18:31:28,826 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740 2024-11-12T18:31:28,827 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T18:31:28,827 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T18:31:28,827 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T18:31:28,828 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T18:31:28,830 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:31:28,830 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=818980, jitterRate=0.04138803482055664}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T18:31:28,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731436288817Initializing all the Stores at 1731436288818 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436288818Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436288818Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436288818Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436288818Cleaning up temporary data from old regions at 1731436288827 (+9 ms)Region opened successfully at 1731436288831 (+4 ms) 2024-11-12T18:31:28,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T18:31:28,831 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T18:31:28,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T18:31:28,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T18:31:28,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T18:31:28,831 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T18:31:28,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731436288831Disabling compacts and flushes for region at 1731436288831Disabling writes for close at 1731436288831Writing region close event to WAL at 1731436288831Closed at 1731436288831 2024-11-12T18:31:28,833 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:31:28,833 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-12T18:31:28,833 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-12T18:31:28,835 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T18:31:28,836 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-12T18:31:28,852 INFO [RS:0;9911683f163c:37059 {}] regionserver.HRegionServer(746): ClusterId : 1a7c822f-4594-4a29-9e60-ebcbc7ee7901 2024-11-12T18:31:28,853 DEBUG [RS:0;9911683f163c:37059 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T18:31:28,859 DEBUG [RS:0;9911683f163c:37059 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T18:31:28,859 DEBUG [RS:0;9911683f163c:37059 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T18:31:28,861 DEBUG [RS:0;9911683f163c:37059 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T18:31:28,862 DEBUG [RS:0;9911683f163c:37059 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fda83ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:31:28,874 DEBUG [RS:0;9911683f163c:37059 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;9911683f163c:37059 2024-11-12T18:31:28,874 INFO [RS:0;9911683f163c:37059 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T18:31:28,874 INFO [RS:0;9911683f163c:37059 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T18:31:28,874 DEBUG [RS:0;9911683f163c:37059 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T18:31:28,875 INFO [RS:0;9911683f163c:37059 {}] regionserver.HRegionServer(2659): reportForDuty to master=9911683f163c,46127,1731436288561 with port=37059, startcode=1731436288626 2024-11-12T18:31:28,875 DEBUG [RS:0;9911683f163c:37059 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T18:31:28,877 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38077, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T18:31:28,877 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46127 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9911683f163c,37059,1731436288626 2024-11-12T18:31:28,878 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46127 {}] master.ServerManager(517): Registering regionserver=9911683f163c,37059,1731436288626 2024-11-12T18:31:28,879 DEBUG [RS:0;9911683f163c:37059 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1 2024-11-12T18:31:28,879 DEBUG [RS:0;9911683f163c:37059 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42641 2024-11-12T18:31:28,879 DEBUG [RS:0;9911683f163c:37059 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T18:31:28,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:31:28,882 DEBUG [RS:0;9911683f163c:37059 {}] zookeeper.ZKUtil(111): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9911683f163c,37059,1731436288626 2024-11-12T18:31:28,882 WARN [RS:0;9911683f163c:37059 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:31:28,882 INFO [RS:0;9911683f163c:37059 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:31:28,882 DEBUG [RS:0;9911683f163c:37059 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/WALs/9911683f163c,37059,1731436288626 2024-11-12T18:31:28,882 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9911683f163c,37059,1731436288626] 2024-11-12T18:31:28,885 INFO [RS:0;9911683f163c:37059 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T18:31:28,887 INFO [RS:0;9911683f163c:37059 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T18:31:28,887 INFO [RS:0;9911683f163c:37059 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T18:31:28,887 INFO [RS:0;9911683f163c:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:28,887 INFO [RS:0;9911683f163c:37059 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T18:31:28,888 INFO [RS:0;9911683f163c:37059 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T18:31:28,888 INFO [RS:0;9911683f163c:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:28,888 DEBUG [RS:0;9911683f163c:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:28,888 DEBUG [RS:0;9911683f163c:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:28,888 DEBUG [RS:0;9911683f163c:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:28,888 DEBUG [RS:0;9911683f163c:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:28,888 DEBUG [RS:0;9911683f163c:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:28,888 DEBUG [RS:0;9911683f163c:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:31:28,888 DEBUG [RS:0;9911683f163c:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:28,888 DEBUG [RS:0;9911683f163c:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:28,888 DEBUG [RS:0;9911683f163c:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:28,888 DEBUG [RS:0;9911683f163c:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:28,888 DEBUG [RS:0;9911683f163c:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:28,889 DEBUG [RS:0;9911683f163c:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:31:28,889 DEBUG [RS:0;9911683f163c:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:31:28,889 DEBUG [RS:0;9911683f163c:37059 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:31:28,890 INFO [RS:0;9911683f163c:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:28,890 INFO [RS:0;9911683f163c:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:28,890 INFO [RS:0;9911683f163c:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:28,890 INFO [RS:0;9911683f163c:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:28,890 INFO [RS:0;9911683f163c:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:28,890 INFO [RS:0;9911683f163c:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,37059,1731436288626-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:31:28,905 INFO [RS:0;9911683f163c:37059 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T18:31:28,906 INFO [RS:0;9911683f163c:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,37059,1731436288626-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:28,906 INFO [RS:0;9911683f163c:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:28,906 INFO [RS:0;9911683f163c:37059 {}] regionserver.Replication(171): 9911683f163c,37059,1731436288626 started 2024-11-12T18:31:28,920 INFO [RS:0;9911683f163c:37059 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:28,920 INFO [RS:0;9911683f163c:37059 {}] regionserver.HRegionServer(1482): Serving as 9911683f163c,37059,1731436288626, RpcServer on 9911683f163c/172.17.0.3:37059, sessionid=0x100354324b40001 2024-11-12T18:31:28,921 DEBUG [RS:0;9911683f163c:37059 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T18:31:28,921 DEBUG [RS:0;9911683f163c:37059 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9911683f163c,37059,1731436288626 2024-11-12T18:31:28,921 DEBUG [RS:0;9911683f163c:37059 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,37059,1731436288626' 2024-11-12T18:31:28,921 DEBUG [RS:0;9911683f163c:37059 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T18:31:28,921 DEBUG [RS:0;9911683f163c:37059 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T18:31:28,922 DEBUG [RS:0;9911683f163c:37059 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T18:31:28,922 DEBUG [RS:0;9911683f163c:37059 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T18:31:28,922 DEBUG [RS:0;9911683f163c:37059 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9911683f163c,37059,1731436288626 2024-11-12T18:31:28,922 DEBUG [RS:0;9911683f163c:37059 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,37059,1731436288626' 2024-11-12T18:31:28,922 DEBUG [RS:0;9911683f163c:37059 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T18:31:28,922 DEBUG [RS:0;9911683f163c:37059 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T18:31:28,922 DEBUG [RS:0;9911683f163c:37059 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T18:31:28,922 INFO [RS:0;9911683f163c:37059 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T18:31:28,922 INFO [RS:0;9911683f163c:37059 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T18:31:28,986 WARN [9911683f163c:46127 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-12T18:31:29,024 INFO [RS:0;9911683f163c:37059 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C37059%2C1731436288626, suffix=, logDir=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/WALs/9911683f163c,37059,1731436288626, archiveDir=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/oldWALs, maxLogs=32 2024-11-12T18:31:29,025 INFO [RS:0;9911683f163c:37059 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C37059%2C1731436288626.1731436289025 2024-11-12T18:31:29,034 INFO [RS:0;9911683f163c:37059 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/WALs/9911683f163c,37059,1731436288626/9911683f163c%2C37059%2C1731436288626.1731436289025 2024-11-12T18:31:29,035 DEBUG [RS:0;9911683f163c:37059 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37587:37587),(127.0.0.1/127.0.0.1:33303:33303)] 2024-11-12T18:31:29,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:29,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:29,237 DEBUG [9911683f163c:46127 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-12T18:31:29,237 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=9911683f163c,37059,1731436288626 2024-11-12T18:31:29,239 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9911683f163c,37059,1731436288626, state=OPENING 2024-11-12T18:31:29,240 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-12T18:31:29,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:29,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:31:29,242 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T18:31:29,243 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=9911683f163c,37059,1731436288626}] 2024-11-12T18:31:29,243 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:31:29,243 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:31:29,396 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T18:31:29,399 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41577, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T18:31:29,404 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-12T18:31:29,404 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:31:29,406 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C37059%2C1731436288626.meta, suffix=.meta, logDir=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/WALs/9911683f163c,37059,1731436288626, archiveDir=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/oldWALs, maxLogs=32 2024-11-12T18:31:29,407 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C37059%2C1731436288626.meta.1731436289407.meta 2024-11-12T18:31:29,428 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/WALs/9911683f163c,37059,1731436288626/9911683f163c%2C37059%2C1731436288626.meta.1731436289407.meta 2024-11-12T18:31:29,440 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33303:33303),(127.0.0.1/127.0.0.1:37587:37587)] 2024-11-12T18:31:29,444 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:31:29,444 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-12T18:31:29,444 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-12T18:31:29,445 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-12T18:31:29,445 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-12T18:31:29,445 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:31:29,445 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-12T18:31:29,445 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-12T18:31:29,452 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T18:31:29,453 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T18:31:29,453 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:29,454 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:29,454 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T18:31:29,455 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T18:31:29,455 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:29,456 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:29,456 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T18:31:29,457 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T18:31:29,457 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:29,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:29,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T18:31:29,458 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T18:31:29,459 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:29,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:31:29,459 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T18:31:29,460 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740 2024-11-12T18:31:29,462 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740 2024-11-12T18:31:29,463 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T18:31:29,463 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T18:31:29,464 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T18:31:29,466 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T18:31:29,467 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=777934, jitterRate=-0.010806441307067871}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T18:31:29,467 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-12T18:31:29,468 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731436289445Writing region info on filesystem at 1731436289445Initializing all the Stores at 1731436289447 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436289447Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436289452 (+5 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436289452Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436289452Cleaning up temporary data from old regions at 1731436289463 (+11 ms)Running coprocessor post-open hooks at 1731436289467 (+4 ms)Region opened successfully at 1731436289468 (+1 ms) 2024-11-12T18:31:29,470 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731436289396 2024-11-12T18:31:29,474 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-12T18:31:29,474 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-12T18:31:29,475 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=9911683f163c,37059,1731436288626 2024-11-12T18:31:29,477 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9911683f163c,37059,1731436288626, state=OPEN 2024-11-12T18:31:29,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:31:29,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:31:29,482 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:31:29,482 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:31:29,482 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=9911683f163c,37059,1731436288626 2024-11-12T18:31:29,487 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-12T18:31:29,487 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=9911683f163c,37059,1731436288626 in 240 msec 2024-11-12T18:31:29,491 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-12T18:31:29,491 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 654 msec 2024-11-12T18:31:29,492 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:31:29,492 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-12T18:31:29,494 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T18:31:29,494 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9911683f163c,37059,1731436288626, seqNum=-1] 2024-11-12T18:31:29,495 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T18:31:29,497 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42941, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T18:31:29,506 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 706 msec 2024-11-12T18:31:29,506 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731436289506, completionTime=-1 2024-11-12T18:31:29,506 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-12T18:31:29,507 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-12T18:31:29,509 INFO [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-12T18:31:29,509 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731436349509 2024-11-12T18:31:29,509 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731436409509 2024-11-12T18:31:29,509 INFO [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-12T18:31:29,510 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,46127,1731436288561-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:29,510 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,46127,1731436288561-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:29,510 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,46127,1731436288561-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:29,510 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-9911683f163c:46127, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:29,510 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:29,512 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:29,513 DEBUG [master/9911683f163c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-12T18:31:29,517 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.845sec 2024-11-12T18:31:29,517 INFO [master/9911683f163c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-12T18:31:29,517 INFO [master/9911683f163c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-12T18:31:29,518 INFO [master/9911683f163c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-12T18:31:29,518 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-12T18:31:29,518 INFO [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-12T18:31:29,518 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,46127,1731436288561-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:31:29,518 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,46127,1731436288561-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-12T18:31:29,523 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-12T18:31:29,523 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-12T18:31:29,523 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,46127,1731436288561-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:31:29,553 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cc09a8e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:31:29,553 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 9911683f163c,46127,-1 for getting cluster id 2024-11-12T18:31:29,553 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-12T18:31:29,555 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '1a7c822f-4594-4a29-9e60-ebcbc7ee7901' 2024-11-12T18:31:29,556 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-12T18:31:29,556 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "1a7c822f-4594-4a29-9e60-ebcbc7ee7901" 2024-11-12T18:31:29,556 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7aae9e36, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:31:29,556 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9911683f163c,46127,-1] 2024-11-12T18:31:29,557 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-12T18:31:29,557 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:31:29,558 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41578, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-12T18:31:29,559 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6932bc3f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:31:29,559 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T18:31:29,561 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9911683f163c,37059,1731436288626, seqNum=-1] 2024-11-12T18:31:29,561 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T18:31:29,562 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35944, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T18:31:29,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=9911683f163c,46127,1731436288561 2024-11-12T18:31:29,564 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:31:29,567 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-12T18:31:29,568 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-12T18:31:29,569 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 9911683f163c,46127,1731436288561 2024-11-12T18:31:29,569 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@42b0320 2024-11-12T18:31:29,569 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-12T18:31:29,570 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41594, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-12T18:31:29,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-12T18:31:29,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-12T18:31:29,571 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T18:31:29,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-12T18:31:29,574 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-12T18:31:29,574 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:29,574 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-12T18:31:29,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T18:31:29,576 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-12T18:31:29,585 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-12T18:31:29,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741835_1011 (size=405) 2024-11-12T18:31:29,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741835_1011 (size=405) 2024-11-12T18:31:29,589 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c26f253b0964acda858fd848ea3d8359, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1 2024-11-12T18:31:29,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741836_1012 (size=88) 2024-11-12T18:31:29,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741836_1012 (size=88) 2024-11-12T18:31:29,598 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:31:29,598 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing c26f253b0964acda858fd848ea3d8359, disabling compactions & flushes 2024-11-12T18:31:29,598 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:31:29,598 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:31:29,598 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. after waiting 0 ms 2024-11-12T18:31:29,598 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:31:29,598 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:31:29,598 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for c26f253b0964acda858fd848ea3d8359: Waiting for close lock at 1731436289598Disabling compacts and flushes for region at 1731436289598Disabling writes for close at 1731436289598Writing region close event to WAL at 1731436289598Closed at 1731436289598 2024-11-12T18:31:29,600 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-12T18:31:29,600 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731436289600"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731436289600"}]},"ts":"1731436289600"} 2024-11-12T18:31:29,603 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-12T18:31:29,604 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-12T18:31:29,605 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731436289605"}]},"ts":"1731436289605"} 2024-11-12T18:31:29,607 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-12T18:31:29,608 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c26f253b0964acda858fd848ea3d8359, ASSIGN}] 2024-11-12T18:31:29,609 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c26f253b0964acda858fd848ea3d8359, ASSIGN 2024-11-12T18:31:29,611 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c26f253b0964acda858fd848ea3d8359, ASSIGN; state=OFFLINE, location=9911683f163c,37059,1731436288626; forceNewPlan=false, retain=false 2024-11-12T18:31:29,761 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c26f253b0964acda858fd848ea3d8359, regionState=OPENING, regionLocation=9911683f163c,37059,1731436288626 2024-11-12T18:31:29,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c26f253b0964acda858fd848ea3d8359, ASSIGN because future has completed 2024-11-12T18:31:29,766 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c26f253b0964acda858fd848ea3d8359, server=9911683f163c,37059,1731436288626}] 2024-11-12T18:31:29,924 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:31:29,924 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c26f253b0964acda858fd848ea3d8359, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359.', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:31:29,925 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling c26f253b0964acda858fd848ea3d8359 2024-11-12T18:31:29,925 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:31:29,925 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c26f253b0964acda858fd848ea3d8359 2024-11-12T18:31:29,925 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c26f253b0964acda858fd848ea3d8359 2024-11-12T18:31:29,926 INFO [StoreOpener-c26f253b0964acda858fd848ea3d8359-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c26f253b0964acda858fd848ea3d8359 2024-11-12T18:31:29,928 INFO [StoreOpener-c26f253b0964acda858fd848ea3d8359-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c26f253b0964acda858fd848ea3d8359 columnFamilyName info 2024-11-12T18:31:29,929 DEBUG [StoreOpener-c26f253b0964acda858fd848ea3d8359-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:31:29,929 INFO [StoreOpener-c26f253b0964acda858fd848ea3d8359-1 {}] regionserver.HStore(327): Store=c26f253b0964acda858fd848ea3d8359/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:31:29,929 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c26f253b0964acda858fd848ea3d8359 2024-11-12T18:31:29,930 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359 2024-11-12T18:31:29,930 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359 2024-11-12T18:31:29,931 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c26f253b0964acda858fd848ea3d8359 2024-11-12T18:31:29,931 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c26f253b0964acda858fd848ea3d8359 2024-11-12T18:31:29,933 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c26f253b0964acda858fd848ea3d8359 2024-11-12T18:31:29,938 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:31:29,938 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c26f253b0964acda858fd848ea3d8359; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=789569, jitterRate=0.003989577293395996}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-12T18:31:29,938 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c26f253b0964acda858fd848ea3d8359 2024-11-12T18:31:29,939 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c26f253b0964acda858fd848ea3d8359: Running coprocessor pre-open hook at 1731436289925Writing region info on filesystem at 1731436289925Initializing all the Stores at 1731436289926 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436289926Cleaning up temporary data from old regions at 1731436289931 (+5 ms)Running coprocessor post-open hooks at 1731436289938 (+7 ms)Region opened successfully at 1731436289939 (+1 ms) 2024-11-12T18:31:29,941 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359., pid=6, masterSystemTime=1731436289919 2024-11-12T18:31:29,944 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:31:29,944 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:31:29,945 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c26f253b0964acda858fd848ea3d8359, regionState=OPEN, openSeqNum=2, regionLocation=9911683f163c,37059,1731436288626 2024-11-12T18:31:29,949 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c26f253b0964acda858fd848ea3d8359, server=9911683f163c,37059,1731436288626 because future has completed 2024-11-12T18:31:29,952 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46127 {}] assignment.AssignmentManager(1535): Unable to acquire lock for regionNode state=OPEN, location=9911683f163c,37059,1731436288626, table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c26f253b0964acda858fd848ea3d8359. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-12T18:31:29,958 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-12T18:31:29,958 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c26f253b0964acda858fd848ea3d8359, server=9911683f163c,37059,1731436288626 in 187 msec 2024-11-12T18:31:29,962 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-12T18:31:29,963 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-12T18:31:29,963 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731436289963"}]},"ts":"1731436289963"} 2024-11-12T18:31:29,964 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c26f253b0964acda858fd848ea3d8359, ASSIGN in 350 msec 2024-11-12T18:31:29,966 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-12T18:31:29,968 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-12T18:31:29,971 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 397 msec 2024-11-12T18:31:30,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:30,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:31,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:31,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:32,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:32,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:33,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:33,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:34,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:34,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:34,947 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-12T18:31:34,948 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:34,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:34,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:34,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:34,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:34,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:34,963 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:34,963 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:34,964 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:34,964 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:34,964 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:34,964 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:34,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:34,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:34,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:34,970 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:31:34,976 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-12T18:31:34,976 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-12T18:31:35,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:35,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:36,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:36,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:37,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:37,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:38,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:38,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:39,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:39,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:39,585 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-12T18:31:39,585 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-12T18:31:39,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T18:31:39,617 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-12T18:31:39,617 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-12T18:31:39,620 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-12T18:31:39,620 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:31:39,623 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359., hostname=9911683f163c,37059,1731436288626, seqNum=2] 2024-11-12T18:31:39,630 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-12T18:31:39,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-12T18:31:39,636 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-12T18:31:39,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T18:31:39,638 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T18:31:39,639 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T18:31:39,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37059 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-12T18:31:39,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:31:39,800 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing c26f253b0964acda858fd848ea3d8359 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-12T18:31:39,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/.tmp/info/d9cea1586ebd4eed8c0de6d15a3ee2af is 1080, key is row0001/info:/1731436299624/Put/seqid=0 2024-11-12T18:31:39,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741837_1013 (size=6033) 2024-11-12T18:31:39,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741837_1013 (size=6033) 2024-11-12T18:31:40,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:40,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:40,223 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/.tmp/info/d9cea1586ebd4eed8c0de6d15a3ee2af 2024-11-12T18:31:40,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/.tmp/info/d9cea1586ebd4eed8c0de6d15a3ee2af as hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/d9cea1586ebd4eed8c0de6d15a3ee2af 2024-11-12T18:31:40,236 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/d9cea1586ebd4eed8c0de6d15a3ee2af, entries=1, sequenceid=5, filesize=5.9 K 2024-11-12T18:31:40,237 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c26f253b0964acda858fd848ea3d8359 in 437ms, sequenceid=5, compaction requested=false 2024-11-12T18:31:40,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for c26f253b0964acda858fd848ea3d8359: 2024-11-12T18:31:40,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:31:40,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-12T18:31:40,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-12T18:31:40,244 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-12T18:31:40,244 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 603 msec 2024-11-12T18:31:40,247 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 614 msec 2024-11-12T18:31:41,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:41,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:42,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:42,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:43,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:43,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:44,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:44,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:45,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:45,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:46,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:46,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:47,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:47,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:48,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:48,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:49,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:49,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:49,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T18:31:49,727 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-12T18:31:49,730 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-12T18:31:49,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-12T18:31:49,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-12T18:31:49,733 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-12T18:31:49,734 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T18:31:49,734 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T18:31:49,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37059 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-12T18:31:49,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:31:49,888 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing c26f253b0964acda858fd848ea3d8359 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-12T18:31:49,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/.tmp/info/b5a5c15a066e4ccea13d34a25d0db554 is 1080, key is row0002/info:/1731436309728/Put/seqid=0 2024-11-12T18:31:49,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741838_1014 (size=6033) 2024-11-12T18:31:49,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741838_1014 (size=6033) 2024-11-12T18:31:49,899 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/.tmp/info/b5a5c15a066e4ccea13d34a25d0db554 2024-11-12T18:31:49,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/.tmp/info/b5a5c15a066e4ccea13d34a25d0db554 as hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/b5a5c15a066e4ccea13d34a25d0db554 2024-11-12T18:31:49,910 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/b5a5c15a066e4ccea13d34a25d0db554, entries=1, sequenceid=9, filesize=5.9 K 2024-11-12T18:31:49,912 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c26f253b0964acda858fd848ea3d8359 in 23ms, sequenceid=9, compaction requested=false 2024-11-12T18:31:49,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for c26f253b0964acda858fd848ea3d8359: 2024-11-12T18:31:49,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:31:49,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-12T18:31:49,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-12T18:31:49,916 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-12T18:31:49,916 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 180 msec 2024-11-12T18:31:49,919 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 187 msec 2024-11-12T18:31:50,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:50,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:51,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:51,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:52,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:52,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:53,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:53,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:54,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:54,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:55,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:55,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:55,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 after 68054ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:55,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta after 68039ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T18:31:56,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:56,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:57,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:57,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:58,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:58,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:58,528 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-12T18:31:59,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:59,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:31:59,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-12T18:31:59,777 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-12T18:31:59,780 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C37059%2C1731436288626.1731436319780 2024-11-12T18:31:59,786 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:59,786 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:59,786 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:59,786 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:59,786 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:31:59,786 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/WALs/9911683f163c,37059,1731436288626/9911683f163c%2C37059%2C1731436288626.1731436289025 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/WALs/9911683f163c,37059,1731436288626/9911683f163c%2C37059%2C1731436288626.1731436319780 2024-11-12T18:31:59,787 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33303:33303),(127.0.0.1/127.0.0.1:37587:37587)] 2024-11-12T18:31:59,787 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/WALs/9911683f163c,37059,1731436288626/9911683f163c%2C37059%2C1731436288626.1731436289025 is not closed yet, will try archiving it next time 2024-11-12T18:31:59,788 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-12T18:31:59,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741833_1009 (size=5546) 2024-11-12T18:31:59,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741833_1009 (size=5546) 2024-11-12T18:31:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-12T18:31:59,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-12T18:31:59,790 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-12T18:31:59,791 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T18:31:59,791 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T18:31:59,944 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37059 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-12T18:31:59,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:31:59,945 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing c26f253b0964acda858fd848ea3d8359 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-12T18:31:59,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/.tmp/info/21c59aa58efa474eb14b00f0bca77c89 is 1080, key is row0003/info:/1731436319779/Put/seqid=0 2024-11-12T18:31:59,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741840_1016 (size=6033) 2024-11-12T18:31:59,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741840_1016 (size=6033) 2024-11-12T18:31:59,955 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/.tmp/info/21c59aa58efa474eb14b00f0bca77c89 2024-11-12T18:31:59,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/.tmp/info/21c59aa58efa474eb14b00f0bca77c89 as hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/21c59aa58efa474eb14b00f0bca77c89 2024-11-12T18:31:59,967 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/21c59aa58efa474eb14b00f0bca77c89, entries=1, sequenceid=13, filesize=5.9 K 2024-11-12T18:31:59,968 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c26f253b0964acda858fd848ea3d8359 in 23ms, sequenceid=13, compaction requested=true 2024-11-12T18:31:59,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for c26f253b0964acda858fd848ea3d8359: 2024-11-12T18:31:59,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:31:59,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-12T18:31:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-12T18:31:59,973 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-12T18:31:59,973 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-11-12T18:31:59,975 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-11-12T18:32:00,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:00,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:01,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:01,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:02,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:02,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:03,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:03,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:04,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:04,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:05,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:05,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:06,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:06,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:07,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:07,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:08,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:08,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:09,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:09,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:09,523 INFO [master/9911683f163c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-12T18:32:09,523 INFO [master/9911683f163c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-12T18:32:09,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-12T18:32:09,857 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-12T18:32:09,857 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T18:32:09,858 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T18:32:09,858 DEBUG [Time-limited test {}] regionserver.HStore(1541): c26f253b0964acda858fd848ea3d8359/info is initiating minor compaction (all files) 2024-11-12T18:32:09,859 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T18:32:09,859 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:09,859 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of c26f253b0964acda858fd848ea3d8359/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:32:09,859 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/d9cea1586ebd4eed8c0de6d15a3ee2af, hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/b5a5c15a066e4ccea13d34a25d0db554, hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/21c59aa58efa474eb14b00f0bca77c89] into tmpdir=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/.tmp, totalSize=17.7 K 2024-11-12T18:32:09,859 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting d9cea1586ebd4eed8c0de6d15a3ee2af, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731436299624 2024-11-12T18:32:09,860 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting b5a5c15a066e4ccea13d34a25d0db554, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731436309728 2024-11-12T18:32:09,860 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 21c59aa58efa474eb14b00f0bca77c89, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731436319779 2024-11-12T18:32:09,871 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): c26f253b0964acda858fd848ea3d8359#info#compaction#44 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T18:32:09,872 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/.tmp/info/66843ecfa98f453fa7e58d0293c15083 is 1080, key is row0001/info:/1731436299624/Put/seqid=0 2024-11-12T18:32:09,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741841_1017 (size=8296) 2024-11-12T18:32:09,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741841_1017 (size=8296) 2024-11-12T18:32:09,884 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/.tmp/info/66843ecfa98f453fa7e58d0293c15083 as hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/66843ecfa98f453fa7e58d0293c15083 2024-11-12T18:32:09,892 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c26f253b0964acda858fd848ea3d8359/info of c26f253b0964acda858fd848ea3d8359 into 66843ecfa98f453fa7e58d0293c15083(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T18:32:09,892 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for c26f253b0964acda858fd848ea3d8359: 2024-11-12T18:32:09,895 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C37059%2C1731436288626.1731436329895 2024-11-12T18:32:09,901 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:09,901 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:09,901 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:09,901 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:09,901 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:09,901 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/WALs/9911683f163c,37059,1731436288626/9911683f163c%2C37059%2C1731436288626.1731436319780 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/WALs/9911683f163c,37059,1731436288626/9911683f163c%2C37059%2C1731436288626.1731436329895 2024-11-12T18:32:09,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741839_1015 (size=2520) 2024-11-12T18:32:09,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741839_1015 (size=2520) 2024-11-12T18:32:09,905 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/WALs/9911683f163c,37059,1731436288626/9911683f163c%2C37059%2C1731436288626.1731436289025 to hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/oldWALs/9911683f163c%2C37059%2C1731436288626.1731436289025 2024-11-12T18:32:09,905 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37587:37587),(127.0.0.1/127.0.0.1:33303:33303)] 2024-11-12T18:32:09,906 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-12T18:32:09,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-12T18:32:09,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-12T18:32:09,909 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-12T18:32:09,910 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T18:32:09,910 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T18:32:10,063 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37059 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-12T18:32:10,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:32:10,063 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing c26f253b0964acda858fd848ea3d8359 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-12T18:32:10,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/.tmp/info/6b1b9136afc84108ade463ba71fa9661 is 1080, key is row0000/info:/1731436329893/Put/seqid=0 2024-11-12T18:32:10,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741843_1019 (size=6033) 2024-11-12T18:32:10,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741843_1019 (size=6033) 2024-11-12T18:32:10,075 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/.tmp/info/6b1b9136afc84108ade463ba71fa9661 2024-11-12T18:32:10,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/.tmp/info/6b1b9136afc84108ade463ba71fa9661 as hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/6b1b9136afc84108ade463ba71fa9661 2024-11-12T18:32:10,085 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/6b1b9136afc84108ade463ba71fa9661, entries=1, sequenceid=18, filesize=5.9 K 2024-11-12T18:32:10,086 INFO [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c26f253b0964acda858fd848ea3d8359 in 23ms, sequenceid=18, compaction requested=false 2024-11-12T18:32:10,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for c26f253b0964acda858fd848ea3d8359: 2024-11-12T18:32:10,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:32:10,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-12T18:32:10,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-12T18:32:10,091 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-12T18:32:10,091 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-11-12T18:32:10,093 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 186 msec 2024-11-12T18:32:10,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:10,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:11,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:11,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:12,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:12,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:13,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:13,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:14,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:14,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:14,925 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c26f253b0964acda858fd848ea3d8359, had cached 0 bytes from a total of 14329 2024-11-12T18:32:15,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:15,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:16,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:16,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:17,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:17,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:18,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:18,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:19,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:19,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:19,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46127 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-12T18:32:19,937 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-12T18:32:19,940 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C37059%2C1731436288626.1731436339940 2024-11-12T18:32:19,946 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:19,946 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:19,946 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:19,946 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:19,946 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:19,947 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/WALs/9911683f163c,37059,1731436288626/9911683f163c%2C37059%2C1731436288626.1731436329895 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/WALs/9911683f163c,37059,1731436288626/9911683f163c%2C37059%2C1731436288626.1731436339940 2024-11-12T18:32:19,947 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33303:33303),(127.0.0.1/127.0.0.1:37587:37587)] 2024-11-12T18:32:19,947 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/WALs/9911683f163c,37059,1731436288626/9911683f163c%2C37059%2C1731436288626.1731436329895 is not closed yet, will try archiving it next time 2024-11-12T18:32:19,948 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/WALs/9911683f163c,37059,1731436288626/9911683f163c%2C37059%2C1731436288626.1731436319780 to hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/oldWALs/9911683f163c%2C37059%2C1731436288626.1731436319780 2024-11-12T18:32:19,948 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-12T18:32:19,948 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T18:32:19,948 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:32:19,948 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:32:19,948 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:32:19,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741842_1018 (size=2026) 2024-11-12T18:32:19,948 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-12T18:32:19,948 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1520150268, stopped=false 2024-11-12T18:32:19,948 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=9911683f163c,46127,1731436288561 2024-11-12T18:32:19,949 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-12T18:32:19,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741842_1018 (size=2026) 2024-11-12T18:32:19,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:32:19,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:32:19,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:32:19,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:32:19,950 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T18:32:19,950 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T18:32:19,951 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:32:19,951 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:32:19,951 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9911683f163c,37059,1731436288626' ***** 2024-11-12T18:32:19,951 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T18:32:19,951 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:32:19,951 INFO [RS:0;9911683f163c:37059 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T18:32:19,951 INFO [RS:0;9911683f163c:37059 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T18:32:19,951 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:32:19,951 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T18:32:19,951 INFO [RS:0;9911683f163c:37059 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T18:32:19,951 INFO [RS:0;9911683f163c:37059 {}] regionserver.HRegionServer(3091): Received CLOSE for c26f253b0964acda858fd848ea3d8359 2024-11-12T18:32:19,952 INFO [RS:0;9911683f163c:37059 {}] regionserver.HRegionServer(959): stopping server 9911683f163c,37059,1731436288626 2024-11-12T18:32:19,952 INFO [RS:0;9911683f163c:37059 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:32:19,952 INFO [RS:0;9911683f163c:37059 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;9911683f163c:37059. 2024-11-12T18:32:19,953 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c26f253b0964acda858fd848ea3d8359, disabling compactions & flushes 2024-11-12T18:32:19,953 DEBUG [RS:0;9911683f163c:37059 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:32:19,953 DEBUG [RS:0;9911683f163c:37059 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:32:19,953 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:32:19,953 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:32:19,953 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. after waiting 0 ms 2024-11-12T18:32:19,953 INFO [RS:0;9911683f163c:37059 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T18:32:19,953 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:32:19,953 INFO [RS:0;9911683f163c:37059 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T18:32:19,953 INFO [RS:0;9911683f163c:37059 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T18:32:19,953 INFO [RS:0;9911683f163c:37059 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-12T18:32:19,953 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing c26f253b0964acda858fd848ea3d8359 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-12T18:32:19,956 INFO [RS:0;9911683f163c:37059 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-12T18:32:19,956 DEBUG [RS:0;9911683f163c:37059 {}] regionserver.HRegionServer(1325): Online Regions={c26f253b0964acda858fd848ea3d8359=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359., 1588230740=hbase:meta,,1.1588230740} 2024-11-12T18:32:19,956 DEBUG [RS:0;9911683f163c:37059 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c26f253b0964acda858fd848ea3d8359 2024-11-12T18:32:19,957 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T18:32:19,957 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T18:32:19,957 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T18:32:19,957 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T18:32:19,957 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T18:32:19,957 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-12T18:32:19,959 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/.tmp/info/76216f2706ee452198ad4d0eee65cc6a is 1080, key is row0001/info:/1731436339938/Put/seqid=0 2024-11-12T18:32:19,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741845_1021 (size=6033) 2024-11-12T18:32:19,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741845_1021 (size=6033) 2024-11-12T18:32:19,966 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/.tmp/info/76216f2706ee452198ad4d0eee65cc6a 2024-11-12T18:32:19,972 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/.tmp/info/76216f2706ee452198ad4d0eee65cc6a as hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/76216f2706ee452198ad4d0eee65cc6a 2024-11-12T18:32:19,978 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/76216f2706ee452198ad4d0eee65cc6a, entries=1, sequenceid=22, filesize=5.9 K 2024-11-12T18:32:19,979 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c26f253b0964acda858fd848ea3d8359 in 26ms, sequenceid=22, compaction requested=true 2024-11-12T18:32:19,979 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/d9cea1586ebd4eed8c0de6d15a3ee2af, hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/b5a5c15a066e4ccea13d34a25d0db554, hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/21c59aa58efa474eb14b00f0bca77c89] to archive 2024-11-12T18:32:19,980 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T18:32:19,981 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740/.tmp/info/ba4074416db94f188a25df27a641863d is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359./info:regioninfo/1731436289945/Put/seqid=0 2024-11-12T18:32:19,982 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/d9cea1586ebd4eed8c0de6d15a3ee2af to hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/d9cea1586ebd4eed8c0de6d15a3ee2af 2024-11-12T18:32:19,984 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/b5a5c15a066e4ccea13d34a25d0db554 to hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/b5a5c15a066e4ccea13d34a25d0db554 2024-11-12T18:32:19,985 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/21c59aa58efa474eb14b00f0bca77c89 to hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/info/21c59aa58efa474eb14b00f0bca77c89 2024-11-12T18:32:19,985 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=9911683f163c:46127 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-12T18:32:19,986 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [d9cea1586ebd4eed8c0de6d15a3ee2af=6033, b5a5c15a066e4ccea13d34a25d0db554=6033, 21c59aa58efa474eb14b00f0bca77c89=6033] 2024-11-12T18:32:19,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741846_1022 (size=7308) 2024-11-12T18:32:19,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741846_1022 (size=7308) 2024-11-12T18:32:19,991 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740/.tmp/info/ba4074416db94f188a25df27a641863d 2024-11-12T18:32:19,993 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c26f253b0964acda858fd848ea3d8359/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-12T18:32:19,994 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:32:19,994 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c26f253b0964acda858fd848ea3d8359: Waiting for close lock at 1731436339952Running coprocessor pre-close hooks at 1731436339953 (+1 ms)Disabling compacts and flushes for region at 1731436339953Disabling writes for close at 1731436339953Obtaining lock to block concurrent updates at 1731436339953Preparing flush snapshotting stores in c26f253b0964acda858fd848ea3d8359 at 1731436339953Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731436339953Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. at 1731436339954 (+1 ms)Flushing c26f253b0964acda858fd848ea3d8359/info: creating writer at 1731436339954Flushing c26f253b0964acda858fd848ea3d8359/info: appending metadata at 1731436339958 (+4 ms)Flushing c26f253b0964acda858fd848ea3d8359/info: closing flushed file at 1731436339958Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7665cc24: reopening flushed file at 1731436339971 (+13 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c26f253b0964acda858fd848ea3d8359 in 26ms, sequenceid=22, compaction requested=true at 1731436339979 (+8 ms)Writing region close event to WAL at 1731436339989 (+10 ms)Running coprocessor post-close hooks at 1731436339994 (+5 ms)Closed at 1731436339994 2024-11-12T18:32:19,994 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731436289571.c26f253b0964acda858fd848ea3d8359. 2024-11-12T18:32:20,011 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740/.tmp/ns/44d9f5f1f2f44f89afaec2dffc715220 is 43, key is default/ns:d/1731436289498/Put/seqid=0 2024-11-12T18:32:20,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741847_1023 (size=5153) 2024-11-12T18:32:20,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741847_1023 (size=5153) 2024-11-12T18:32:20,017 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740/.tmp/ns/44d9f5f1f2f44f89afaec2dffc715220 2024-11-12T18:32:20,037 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740/.tmp/table/adddbb5a503348389a3617d409910884 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731436289963/Put/seqid=0 2024-11-12T18:32:20,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741848_1024 (size=5508) 2024-11-12T18:32:20,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741848_1024 (size=5508) 2024-11-12T18:32:20,042 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740/.tmp/table/adddbb5a503348389a3617d409910884 2024-11-12T18:32:20,048 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740/.tmp/info/ba4074416db94f188a25df27a641863d as hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740/info/ba4074416db94f188a25df27a641863d 2024-11-12T18:32:20,053 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740/info/ba4074416db94f188a25df27a641863d, entries=10, sequenceid=11, filesize=7.1 K 2024-11-12T18:32:20,054 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740/.tmp/ns/44d9f5f1f2f44f89afaec2dffc715220 as hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740/ns/44d9f5f1f2f44f89afaec2dffc715220 2024-11-12T18:32:20,058 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740/ns/44d9f5f1f2f44f89afaec2dffc715220, entries=2, sequenceid=11, filesize=5.0 K 2024-11-12T18:32:20,059 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740/.tmp/table/adddbb5a503348389a3617d409910884 as hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740/table/adddbb5a503348389a3617d409910884 2024-11-12T18:32:20,064 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740/table/adddbb5a503348389a3617d409910884, entries=2, sequenceid=11, filesize=5.4 K 2024-11-12T18:32:20,066 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 108ms, sequenceid=11, compaction requested=false 2024-11-12T18:32:20,070 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-12T18:32:20,071 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T18:32:20,071 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T18:32:20,071 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731436339956Running coprocessor pre-close hooks at 1731436339956Disabling compacts and flushes for region at 1731436339956Disabling writes for close at 1731436339957 (+1 ms)Obtaining lock to block concurrent updates at 1731436339957Preparing flush snapshotting stores in 1588230740 at 1731436339957Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731436339957Flushing stores of hbase:meta,,1.1588230740 at 1731436339958 (+1 ms)Flushing 1588230740/info: creating writer at 1731436339958Flushing 1588230740/info: appending metadata at 1731436339980 (+22 ms)Flushing 1588230740/info: closing flushed file at 1731436339980Flushing 1588230740/ns: creating writer at 1731436339997 (+17 ms)Flushing 1588230740/ns: appending metadata at 1731436340011 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731436340011Flushing 1588230740/table: creating writer at 1731436340023 (+12 ms)Flushing 1588230740/table: appending metadata at 1731436340037 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731436340037Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@511ad1ad: reopening flushed file at 1731436340047 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7202b776: reopening flushed file at 1731436340053 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@403f7e16: reopening flushed file at 1731436340059 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 108ms, sequenceid=11, compaction requested=false at 1731436340066 (+7 ms)Writing region close event to WAL at 1731436340067 (+1 ms)Running coprocessor post-close hooks at 1731436340071 (+4 ms)Closed at 1731436340071 2024-11-12T18:32:20,071 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-12T18:32:20,157 INFO [RS:0;9911683f163c:37059 {}] regionserver.HRegionServer(976): stopping server 9911683f163c,37059,1731436288626; all regions closed. 2024-11-12T18:32:20,157 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:20,157 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:20,157 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:20,158 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:20,158 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:20,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741834_1010 (size=3306) 2024-11-12T18:32:20,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741834_1010 (size=3306) 2024-11-12T18:32:20,162 DEBUG [RS:0;9911683f163c:37059 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/oldWALs 2024-11-12T18:32:20,162 INFO [RS:0;9911683f163c:37059 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9911683f163c%2C37059%2C1731436288626.meta:.meta(num 1731436289407) 2024-11-12T18:32:20,163 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:20,163 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:20,163 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:20,163 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:20,163 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:20,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741844_1020 (size=1252) 2024-11-12T18:32:20,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741844_1020 (size=1252) 2024-11-12T18:32:20,168 DEBUG [RS:0;9911683f163c:37059 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/oldWALs 2024-11-12T18:32:20,168 INFO [RS:0;9911683f163c:37059 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9911683f163c%2C37059%2C1731436288626:(num 1731436339940) 2024-11-12T18:32:20,168 DEBUG [RS:0;9911683f163c:37059 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:32:20,168 INFO [RS:0;9911683f163c:37059 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:32:20,168 INFO [RS:0;9911683f163c:37059 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:32:20,168 INFO [RS:0;9911683f163c:37059 {}] hbase.ChoreService(370): Chore service for: regionserver/9911683f163c:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-12T18:32:20,168 INFO [RS:0;9911683f163c:37059 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:32:20,168 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:32:20,169 INFO [RS:0;9911683f163c:37059 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37059 2024-11-12T18:32:20,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:32:20,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9911683f163c,37059,1731436288626 2024-11-12T18:32:20,171 INFO [RS:0;9911683f163c:37059 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:32:20,171 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9911683f163c,37059,1731436288626] 2024-11-12T18:32:20,174 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9911683f163c,37059,1731436288626 already deleted, retry=false 2024-11-12T18:32:20,174 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9911683f163c,37059,1731436288626 expired; onlineServers=0 2024-11-12T18:32:20,174 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '9911683f163c,46127,1731436288561' ***** 2024-11-12T18:32:20,174 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-12T18:32:20,174 INFO [M:0;9911683f163c:46127 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:32:20,174 INFO [M:0;9911683f163c:46127 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:32:20,174 DEBUG [M:0;9911683f163c:46127 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-12T18:32:20,174 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-12T18:32:20,174 DEBUG [M:0;9911683f163c:46127 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-12T18:32:20,174 DEBUG [master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436288803 {}] cleaner.HFileCleaner(306): Exit Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436288803,5,FailOnTimeoutGroup] 2024-11-12T18:32:20,174 DEBUG [master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436288803 {}] cleaner.HFileCleaner(306): Exit Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436288803,5,FailOnTimeoutGroup] 2024-11-12T18:32:20,174 INFO [M:0;9911683f163c:46127 {}] hbase.ChoreService(370): Chore service for: master/9911683f163c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-12T18:32:20,174 INFO [M:0;9911683f163c:46127 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:32:20,174 DEBUG [M:0;9911683f163c:46127 {}] master.HMaster(1795): Stopping service threads 2024-11-12T18:32:20,174 INFO [M:0;9911683f163c:46127 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-12T18:32:20,174 INFO [M:0;9911683f163c:46127 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T18:32:20,175 INFO [M:0;9911683f163c:46127 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-12T18:32:20,175 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-12T18:32:20,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-12T18:32:20,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:32:20,176 DEBUG [M:0;9911683f163c:46127 {}] zookeeper.ZKUtil(347): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-12T18:32:20,176 WARN [M:0;9911683f163c:46127 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-12T18:32:20,176 INFO [M:0;9911683f163c:46127 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/.lastflushedseqids 2024-11-12T18:32:20,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741849_1025 (size=130) 2024-11-12T18:32:20,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741849_1025 (size=130) 2024-11-12T18:32:20,182 INFO [M:0;9911683f163c:46127 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-12T18:32:20,182 INFO [M:0;9911683f163c:46127 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-12T18:32:20,182 DEBUG [M:0;9911683f163c:46127 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T18:32:20,182 INFO [M:0;9911683f163c:46127 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:32:20,183 DEBUG [M:0;9911683f163c:46127 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:32:20,183 DEBUG [M:0;9911683f163c:46127 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T18:32:20,183 DEBUG [M:0;9911683f163c:46127 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:32:20,183 INFO [M:0;9911683f163c:46127 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.54 KB heapSize=54.91 KB 2024-11-12T18:32:20,199 DEBUG [M:0;9911683f163c:46127 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b15d15dc69524144a8dbd6b9de1ab813 is 82, key is hbase:meta,,1/info:regioninfo/1731436289475/Put/seqid=0 2024-11-12T18:32:20,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741850_1026 (size=5672) 2024-11-12T18:32:20,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741850_1026 (size=5672) 2024-11-12T18:32:20,205 INFO [M:0;9911683f163c:46127 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b15d15dc69524144a8dbd6b9de1ab813 2024-11-12T18:32:20,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:20,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:20,225 DEBUG [M:0;9911683f163c:46127 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c32bc84ec8c54b86bd92adaa85fd77be is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731436289970/Put/seqid=0 2024-11-12T18:32:20,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741851_1027 (size=7818) 2024-11-12T18:32:20,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741851_1027 (size=7818) 2024-11-12T18:32:20,231 INFO [M:0;9911683f163c:46127 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.94 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c32bc84ec8c54b86bd92adaa85fd77be 2024-11-12T18:32:20,235 INFO [M:0;9911683f163c:46127 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c32bc84ec8c54b86bd92adaa85fd77be 2024-11-12T18:32:20,250 DEBUG [M:0;9911683f163c:46127 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1b10a804e39543199f96456465415345 is 69, key is 9911683f163c,37059,1731436288626/rs:state/1731436288878/Put/seqid=0 2024-11-12T18:32:20,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741852_1028 (size=5156) 2024-11-12T18:32:20,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741852_1028 (size=5156) 2024-11-12T18:32:20,255 INFO [M:0;9911683f163c:46127 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1b10a804e39543199f96456465415345 2024-11-12T18:32:20,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:32:20,272 INFO [RS:0;9911683f163c:37059 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:32:20,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37059-0x100354324b40001, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:32:20,273 INFO [RS:0;9911683f163c:37059 {}] regionserver.HRegionServer(1031): Exiting; stopping=9911683f163c,37059,1731436288626; zookeeper connection closed. 2024-11-12T18:32:20,273 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6844f6f5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6844f6f5 2024-11-12T18:32:20,273 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-12T18:32:20,274 DEBUG [M:0;9911683f163c:46127 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4d33c09d3d234a41862214a2462f5b22 is 52, key is load_balancer_on/state:d/1731436289566/Put/seqid=0 2024-11-12T18:32:20,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741853_1029 (size=5056) 2024-11-12T18:32:20,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741853_1029 (size=5056) 2024-11-12T18:32:20,280 INFO [M:0;9911683f163c:46127 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4d33c09d3d234a41862214a2462f5b22 2024-11-12T18:32:20,285 DEBUG [M:0;9911683f163c:46127 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b15d15dc69524144a8dbd6b9de1ab813 as hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b15d15dc69524144a8dbd6b9de1ab813 2024-11-12T18:32:20,290 INFO [M:0;9911683f163c:46127 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b15d15dc69524144a8dbd6b9de1ab813, entries=8, sequenceid=121, filesize=5.5 K 2024-11-12T18:32:20,291 DEBUG [M:0;9911683f163c:46127 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c32bc84ec8c54b86bd92adaa85fd77be as hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c32bc84ec8c54b86bd92adaa85fd77be 2024-11-12T18:32:20,296 INFO [M:0;9911683f163c:46127 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c32bc84ec8c54b86bd92adaa85fd77be 2024-11-12T18:32:20,296 INFO [M:0;9911683f163c:46127 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c32bc84ec8c54b86bd92adaa85fd77be, entries=14, sequenceid=121, filesize=7.6 K 2024-11-12T18:32:20,297 DEBUG [M:0;9911683f163c:46127 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1b10a804e39543199f96456465415345 as hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1b10a804e39543199f96456465415345 2024-11-12T18:32:20,301 INFO [M:0;9911683f163c:46127 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1b10a804e39543199f96456465415345, entries=1, sequenceid=121, filesize=5.0 K 2024-11-12T18:32:20,302 DEBUG [M:0;9911683f163c:46127 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4d33c09d3d234a41862214a2462f5b22 as hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4d33c09d3d234a41862214a2462f5b22 2024-11-12T18:32:20,306 INFO [M:0;9911683f163c:46127 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42641/user/jenkins/test-data/046d0e75-21a0-b29c-a6c8-b33856296fe1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4d33c09d3d234a41862214a2462f5b22, entries=1, sequenceid=121, filesize=4.9 K 2024-11-12T18:32:20,307 INFO [M:0;9911683f163c:46127 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=121, compaction requested=false 2024-11-12T18:32:20,309 INFO [M:0;9911683f163c:46127 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:32:20,309 DEBUG [M:0;9911683f163c:46127 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731436340182Disabling compacts and flushes for region at 1731436340182Disabling writes for close at 1731436340183 (+1 ms)Obtaining lock to block concurrent updates at 1731436340183Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731436340183Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44590, getHeapSize=56168, getOffHeapSize=0, getCellsCount=140 at 1731436340183Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731436340184 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731436340184Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731436340198 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731436340198Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731436340210 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731436340225 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731436340225Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731436340236 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731436340249 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731436340250 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731436340260 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731436340274 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731436340274Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c029f0: reopening flushed file at 1731436340285 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ce0a033: reopening flushed file at 1731436340290 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@746fefaa: reopening flushed file at 1731436340296 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23162590: reopening flushed file at 1731436340301 (+5 ms)Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=121, compaction requested=false at 1731436340307 (+6 ms)Writing region close event to WAL at 1731436340309 (+2 ms)Closed at 1731436340309 2024-11-12T18:32:20,309 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:20,309 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:20,309 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:20,309 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:20,310 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:32:20,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741830_1006 (size=52987) 2024-11-12T18:32:20,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741830_1006 (size=52987) 2024-11-12T18:32:20,312 INFO [M:0;9911683f163c:46127 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-12T18:32:20,312 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:32:20,312 INFO [M:0;9911683f163c:46127 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:46127 2024-11-12T18:32:20,312 INFO [M:0;9911683f163c:46127 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:32:20,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:32:20,414 INFO [M:0;9911683f163c:46127 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:32:20,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46127-0x100354324b40000, quorum=127.0.0.1:55274, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:32:20,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7273ffd9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:32:20,417 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5501be24{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:32:20,417 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:32:20,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10bb53c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:32:20,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e776489{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/hadoop.log.dir/,STOPPED} 2024-11-12T18:32:20,419 WARN [BP-6681107-172.17.0.3-1731436287778 heartbeating to localhost/127.0.0.1:42641 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:32:20,419 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:32:20,419 WARN [BP-6681107-172.17.0.3-1731436287778 heartbeating to localhost/127.0.0.1:42641 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-6681107-172.17.0.3-1731436287778 (Datanode Uuid 19019242-c273-42e8-8959-7efd37ec4e04) service to localhost/127.0.0.1:42641 2024-11-12T18:32:20,419 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:32:20,420 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/cluster_abd5de15-dadd-f4e0-5d64-6879e7382ac2/data/data3/current/BP-6681107-172.17.0.3-1731436287778 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:32:20,420 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/cluster_abd5de15-dadd-f4e0-5d64-6879e7382ac2/data/data4/current/BP-6681107-172.17.0.3-1731436287778 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:32:20,420 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:32:20,424 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6aa18e30{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:32:20,424 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@37be9df5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:32:20,424 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:32:20,424 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5218abd2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:32:20,424 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3edbaed2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/hadoop.log.dir/,STOPPED} 2024-11-12T18:32:20,426 WARN [BP-6681107-172.17.0.3-1731436287778 heartbeating to localhost/127.0.0.1:42641 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:32:20,426 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:32:20,426 WARN [BP-6681107-172.17.0.3-1731436287778 heartbeating to localhost/127.0.0.1:42641 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-6681107-172.17.0.3-1731436287778 (Datanode Uuid 451c84e1-fb9f-42b8-8992-ef6cc6267549) service to localhost/127.0.0.1:42641 2024-11-12T18:32:20,426 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:32:20,427 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/cluster_abd5de15-dadd-f4e0-5d64-6879e7382ac2/data/data1/current/BP-6681107-172.17.0.3-1731436287778 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:32:20,427 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/cluster_abd5de15-dadd-f4e0-5d64-6879e7382ac2/data/data2/current/BP-6681107-172.17.0.3-1731436287778 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:32:20,427 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:32:20,433 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@555de183{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T18:32:20,434 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@796376f7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:32:20,434 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:32:20,434 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b2ab1bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:32:20,434 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6db0a9fa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/hadoop.log.dir/,STOPPED} 2024-11-12T18:32:20,440 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-12T18:32:20,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-12T18:32:20,468 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 179) Potentially hanging thread: IPC Client (122747033) connection to localhost/127.0.0.1:42641 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (122747033) connection to localhost/127.0.0.1:42641 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42641 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42641 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:42641 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42641 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/9911683f163c:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42641 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42641 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (122747033) connection to localhost/127.0.0.1:42641 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=91 (was 172), ProcessCount=11 (was 11), AvailableMemoryMB=6351 (was 6220) - AvailableMemoryMB LEAK? - 2024-11-12T18:32:20,476 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=91, ProcessCount=11, AvailableMemoryMB=6351 2024-11-12T18:32:20,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-12T18:32:20,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/hadoop.log.dir so I do NOT create it in target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248 2024-11-12T18:32:20,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6763459c-5695-c9ce-fedc-77ecacebcc9b/hadoop.tmp.dir so I do NOT create it in target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248 2024-11-12T18:32:20,477 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/cluster_ac05d1ae-a92b-0c6a-4208-0cf67ca7ffce, deleteOnExit=true 2024-11-12T18:32:20,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-12T18:32:20,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/test.cache.data in system properties and HBase conf 2024-11-12T18:32:20,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/hadoop.tmp.dir in system properties and HBase conf 2024-11-12T18:32:20,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/hadoop.log.dir in system properties and HBase conf 2024-11-12T18:32:20,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-12T18:32:20,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-12T18:32:20,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-12T18:32:20,477 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-12T18:32:20,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-12T18:32:20,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-12T18:32:20,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-12T18:32:20,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T18:32:20,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-12T18:32:20,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-12T18:32:20,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T18:32:20,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T18:32:20,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-12T18:32:20,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/nfs.dump.dir in system properties and HBase conf 2024-11-12T18:32:20,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/java.io.tmpdir in system properties and HBase conf 2024-11-12T18:32:20,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T18:32:20,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-12T18:32:20,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-12T18:32:20,491 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-12T18:32:20,557 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:32:20,561 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:32:20,562 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:32:20,562 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:32:20,562 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T18:32:20,563 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:32:20,563 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@580f3be3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:32:20,564 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f4c5c16{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:32:20,677 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1a5cc73{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/java.io.tmpdir/jetty-localhost-39879-hadoop-hdfs-3_4_1-tests_jar-_-any-7751237080668081921/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T18:32:20,677 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1d00cf8f{HTTP/1.1, (http/1.1)}{localhost:39879} 2024-11-12T18:32:20,677 INFO [Time-limited test {}] server.Server(415): Started @236439ms 2024-11-12T18:32:20,690 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-12T18:32:20,739 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:32:20,742 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:32:20,743 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:32:20,743 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:32:20,743 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:32:20,743 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e1bddd6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:32:20,744 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e62dbc1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:32:20,858 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@16c2050e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/java.io.tmpdir/jetty-localhost-43023-hadoop-hdfs-3_4_1-tests_jar-_-any-10047742091686405907/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:32:20,858 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@391d7ae4{HTTP/1.1, (http/1.1)}{localhost:43023} 2024-11-12T18:32:20,858 INFO [Time-limited test {}] server.Server(415): Started @236620ms 2024-11-12T18:32:20,859 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:32:20,894 INFO [regionserver/9911683f163c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:32:20,908 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:32:20,911 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:32:20,912 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:32:20,912 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:32:20,912 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T18:32:20,912 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@738488a9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:32:20,912 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1abf8fe3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:32:20,962 WARN [Thread-1948 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/cluster_ac05d1ae-a92b-0c6a-4208-0cf67ca7ffce/data/data1/current/BP-8344189-172.17.0.3-1731436340498/current, will proceed with Du for space computation calculation, 2024-11-12T18:32:20,962 WARN [Thread-1949 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/cluster_ac05d1ae-a92b-0c6a-4208-0cf67ca7ffce/data/data2/current/BP-8344189-172.17.0.3-1731436340498/current, will proceed with Du for space computation calculation, 2024-11-12T18:32:20,978 WARN [Thread-1927 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:32:20,981 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5eb2dd4eb38ca663 with lease ID 0xd00e5891a879dc37: Processing first storage report for DS-ef4a882e-295b-4cdf-96b2-ba4851d1c4b1 from datanode DatanodeRegistration(127.0.0.1:36307, datanodeUuid=53fb17a5-9ac2-430a-aa14-7f7814cd5b61, infoPort=42091, infoSecurePort=0, ipcPort=42289, storageInfo=lv=-57;cid=testClusterID;nsid=629665981;c=1731436340498) 2024-11-12T18:32:20,981 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5eb2dd4eb38ca663 with lease ID 0xd00e5891a879dc37: from storage DS-ef4a882e-295b-4cdf-96b2-ba4851d1c4b1 node DatanodeRegistration(127.0.0.1:36307, datanodeUuid=53fb17a5-9ac2-430a-aa14-7f7814cd5b61, infoPort=42091, infoSecurePort=0, ipcPort=42289, storageInfo=lv=-57;cid=testClusterID;nsid=629665981;c=1731436340498), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-12T18:32:20,981 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5eb2dd4eb38ca663 with lease ID 0xd00e5891a879dc37: Processing first storage report for DS-7ea15003-ae1a-4d51-9ead-d46d48a2b788 from datanode DatanodeRegistration(127.0.0.1:36307, datanodeUuid=53fb17a5-9ac2-430a-aa14-7f7814cd5b61, infoPort=42091, infoSecurePort=0, ipcPort=42289, storageInfo=lv=-57;cid=testClusterID;nsid=629665981;c=1731436340498) 2024-11-12T18:32:20,981 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5eb2dd4eb38ca663 with lease ID 0xd00e5891a879dc37: from storage DS-7ea15003-ae1a-4d51-9ead-d46d48a2b788 node DatanodeRegistration(127.0.0.1:36307, datanodeUuid=53fb17a5-9ac2-430a-aa14-7f7814cd5b61, infoPort=42091, infoSecurePort=0, ipcPort=42289, storageInfo=lv=-57;cid=testClusterID;nsid=629665981;c=1731436340498), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:32:21,026 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@9c38101{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/java.io.tmpdir/jetty-localhost-37971-hadoop-hdfs-3_4_1-tests_jar-_-any-11180134350307313680/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:32:21,026 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3b1137c4{HTTP/1.1, (http/1.1)}{localhost:37971} 2024-11-12T18:32:21,026 INFO [Time-limited test {}] server.Server(415): Started @236788ms 2024-11-12T18:32:21,028 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:32:21,108 WARN [Thread-1974 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/cluster_ac05d1ae-a92b-0c6a-4208-0cf67ca7ffce/data/data3/current/BP-8344189-172.17.0.3-1731436340498/current, will proceed with Du for space computation calculation, 2024-11-12T18:32:21,108 WARN [Thread-1975 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/cluster_ac05d1ae-a92b-0c6a-4208-0cf67ca7ffce/data/data4/current/BP-8344189-172.17.0.3-1731436340498/current, will proceed with Du for space computation calculation, 2024-11-12T18:32:21,124 WARN [Thread-1963 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:32:21,127 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf4fb35b43c96e1c6 with lease ID 0xd00e5891a879dc38: Processing first storage report for DS-38ef233d-2008-4946-9388-400f1838e93b from datanode DatanodeRegistration(127.0.0.1:38811, datanodeUuid=38bc28a8-4c38-4a4a-bca6-91cb8af14694, infoPort=41125, infoSecurePort=0, ipcPort=41595, storageInfo=lv=-57;cid=testClusterID;nsid=629665981;c=1731436340498) 2024-11-12T18:32:21,127 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf4fb35b43c96e1c6 with lease ID 0xd00e5891a879dc38: from storage DS-38ef233d-2008-4946-9388-400f1838e93b node DatanodeRegistration(127.0.0.1:38811, datanodeUuid=38bc28a8-4c38-4a4a-bca6-91cb8af14694, infoPort=41125, infoSecurePort=0, ipcPort=41595, storageInfo=lv=-57;cid=testClusterID;nsid=629665981;c=1731436340498), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:32:21,127 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf4fb35b43c96e1c6 with lease ID 0xd00e5891a879dc38: Processing first storage report for DS-9e8645cf-d9b1-40f3-b31f-a7806b8f07cd from datanode DatanodeRegistration(127.0.0.1:38811, datanodeUuid=38bc28a8-4c38-4a4a-bca6-91cb8af14694, infoPort=41125, infoSecurePort=0, ipcPort=41595, storageInfo=lv=-57;cid=testClusterID;nsid=629665981;c=1731436340498) 2024-11-12T18:32:21,127 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf4fb35b43c96e1c6 with lease ID 0xd00e5891a879dc38: from storage DS-9e8645cf-d9b1-40f3-b31f-a7806b8f07cd node DatanodeRegistration(127.0.0.1:38811, datanodeUuid=38bc28a8-4c38-4a4a-bca6-91cb8af14694, infoPort=41125, infoSecurePort=0, ipcPort=41595, storageInfo=lv=-57;cid=testClusterID;nsid=629665981;c=1731436340498), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:32:21,150 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248 2024-11-12T18:32:21,153 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/cluster_ac05d1ae-a92b-0c6a-4208-0cf67ca7ffce/zookeeper_0, clientPort=55721, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/cluster_ac05d1ae-a92b-0c6a-4208-0cf67ca7ffce/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/cluster_ac05d1ae-a92b-0c6a-4208-0cf67ca7ffce/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-12T18:32:21,153 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55721 2024-11-12T18:32:21,154 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:32:21,155 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:32:21,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741825_1001 (size=7) 2024-11-12T18:32:21,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741825_1001 (size=7) 2024-11-12T18:32:21,164 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110 with version=8 2024-11-12T18:32:21,164 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/hbase-staging 2024-11-12T18:32:21,166 INFO [Time-limited test {}] client.ConnectionUtils(128): master/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:32:21,166 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:32:21,166 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:32:21,166 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:32:21,166 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:32:21,167 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:32:21,167 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-12T18:32:21,167 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:32:21,167 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38027 2024-11-12T18:32:21,168 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38027 connecting to ZooKeeper ensemble=127.0.0.1:55721 2024-11-12T18:32:21,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:380270x0, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:32:21,175 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38027-0x1003543f23e0000 connected 2024-11-12T18:32:21,188 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:32:21,190 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:32:21,193 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:32:21,193 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110, hbase.cluster.distributed=false 2024-11-12T18:32:21,195 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:32:21,195 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38027 2024-11-12T18:32:21,195 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38027 2024-11-12T18:32:21,196 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38027 2024-11-12T18:32:21,196 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38027 2024-11-12T18:32:21,196 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38027 2024-11-12T18:32:21,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:21,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:21,218 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:32:21,219 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:32:21,219 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:32:21,219 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:32:21,219 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:32:21,219 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:32:21,219 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T18:32:21,219 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:32:21,220 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37187 2024-11-12T18:32:21,221 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37187 connecting to ZooKeeper ensemble=127.0.0.1:55721 2024-11-12T18:32:21,221 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:32:21,223 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:32:21,227 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:371870x0, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:32:21,227 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:371870x0, quorum=127.0.0.1:55721, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:32:21,227 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37187-0x1003543f23e0001 connected 2024-11-12T18:32:21,227 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T18:32:21,228 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T18:32:21,228 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37187-0x1003543f23e0001, quorum=127.0.0.1:55721, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T18:32:21,229 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37187-0x1003543f23e0001, quorum=127.0.0.1:55721, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:32:21,229 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37187 2024-11-12T18:32:21,230 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37187 2024-11-12T18:32:21,230 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37187 2024-11-12T18:32:21,230 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37187 2024-11-12T18:32:21,230 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37187 2024-11-12T18:32:21,242 DEBUG [M:0;9911683f163c:38027 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;9911683f163c:38027 2024-11-12T18:32:21,242 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/9911683f163c,38027,1731436341166 2024-11-12T18:32:21,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1003543f23e0001, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:32:21,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:32:21,245 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/9911683f163c,38027,1731436341166 2024-11-12T18:32:21,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1003543f23e0001, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T18:32:21,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:32:21,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1003543f23e0001, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:32:21,246 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-12T18:32:21,247 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/9911683f163c,38027,1731436341166 from backup master directory 2024-11-12T18:32:21,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/9911683f163c,38027,1731436341166 2024-11-12T18:32:21,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:32:21,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1003543f23e0001, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:32:21,249 WARN [master/9911683f163c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:32:21,249 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=9911683f163c,38027,1731436341166 2024-11-12T18:32:21,253 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/hbase.id] with ID: 5650b3e7-c2e8-4074-aafd-a9a3f6e135d9 2024-11-12T18:32:21,253 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/.tmp/hbase.id 2024-11-12T18:32:21,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741826_1002 (size=42) 2024-11-12T18:32:21,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741826_1002 (size=42) 2024-11-12T18:32:21,260 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/.tmp/hbase.id]:[hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/hbase.id] 2024-11-12T18:32:21,270 INFO [master/9911683f163c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:32:21,270 INFO [master/9911683f163c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-12T18:32:21,271 INFO [master/9911683f163c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-12T18:32:21,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:32:21,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1003543f23e0001, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:32:21,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741827_1003 (size=196) 2024-11-12T18:32:21,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741827_1003 (size=196) 2024-11-12T18:32:21,280 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T18:32:21,280 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-12T18:32:21,281 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:32:21,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741828_1004 (size=1189) 2024-11-12T18:32:21,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741828_1004 (size=1189) 2024-11-12T18:32:21,288 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store 2024-11-12T18:32:21,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741829_1005 (size=34) 2024-11-12T18:32:21,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741829_1005 (size=34) 2024-11-12T18:32:21,294 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:32:21,295 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T18:32:21,295 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:32:21,295 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:32:21,295 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T18:32:21,295 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:32:21,295 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:32:21,295 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731436341295Disabling compacts and flushes for region at 1731436341295Disabling writes for close at 1731436341295Writing region close event to WAL at 1731436341295Closed at 1731436341295 2024-11-12T18:32:21,296 WARN [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/.initializing 2024-11-12T18:32:21,296 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/WALs/9911683f163c,38027,1731436341166 2024-11-12T18:32:21,298 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C38027%2C1731436341166, suffix=, logDir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/WALs/9911683f163c,38027,1731436341166, archiveDir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/oldWALs, maxLogs=10 2024-11-12T18:32:21,298 INFO [master/9911683f163c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C38027%2C1731436341166.1731436341298 2024-11-12T18:32:21,303 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/WALs/9911683f163c,38027,1731436341166/9911683f163c%2C38027%2C1731436341166.1731436341298 2024-11-12T18:32:21,304 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42091:42091),(127.0.0.1/127.0.0.1:41125:41125)] 2024-11-12T18:32:21,307 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:32:21,307 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:32:21,307 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:32:21,307 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:32:21,308 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:32:21,309 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-12T18:32:21,310 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:32:21,310 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:32:21,310 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:32:21,311 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-12T18:32:21,311 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:32:21,312 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:32:21,312 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:32:21,313 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-12T18:32:21,313 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:32:21,313 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:32:21,313 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:32:21,314 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-12T18:32:21,314 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:32:21,315 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:32:21,315 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:32:21,316 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:32:21,316 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:32:21,317 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:32:21,317 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:32:21,318 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T18:32:21,319 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:32:21,321 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:32:21,321 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856519, jitterRate=0.08912086486816406}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T18:32:21,322 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731436341307Initializing all the Stores at 1731436341308 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436341308Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436341308Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436341308Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436341308Cleaning up temporary data from old regions at 1731436341317 (+9 ms)Region opened successfully at 1731436341322 (+5 ms) 2024-11-12T18:32:21,322 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-12T18:32:21,325 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24712e94, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:32:21,326 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-12T18:32:21,326 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-12T18:32:21,326 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-12T18:32:21,326 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-12T18:32:21,327 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-12T18:32:21,327 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-12T18:32:21,327 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-12T18:32:21,329 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-12T18:32:21,330 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-12T18:32:21,331 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-12T18:32:21,331 INFO [master/9911683f163c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-12T18:32:21,332 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-12T18:32:21,333 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-12T18:32:21,333 INFO [master/9911683f163c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-12T18:32:21,334 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-12T18:32:21,335 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-12T18:32:21,336 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-12T18:32:21,337 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-12T18:32:21,339 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-12T18:32:21,341 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-12T18:32:21,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1003543f23e0001, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:32:21,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:32:21,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:32:21,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1003543f23e0001, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:32:21,343 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=9911683f163c,38027,1731436341166, sessionid=0x1003543f23e0000, setting cluster-up flag (Was=false) 2024-11-12T18:32:21,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:32:21,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1003543f23e0001, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:32:21,351 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-12T18:32:21,352 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9911683f163c,38027,1731436341166 2024-11-12T18:32:21,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:32:21,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1003543f23e0001, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:32:21,360 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-12T18:32:21,361 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9911683f163c,38027,1731436341166 2024-11-12T18:32:21,362 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-12T18:32:21,363 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-12T18:32:21,364 INFO [master/9911683f163c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-12T18:32:21,364 INFO [master/9911683f163c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-12T18:32:21,364 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 9911683f163c,38027,1731436341166 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-12T18:32:21,365 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:32:21,365 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:32:21,365 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:32:21,365 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:32:21,365 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/9911683f163c:0, corePoolSize=10, maxPoolSize=10 2024-11-12T18:32:21,365 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:32:21,365 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:32:21,365 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:32:21,368 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731436371368 2024-11-12T18:32:21,368 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-12T18:32:21,368 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-12T18:32:21,368 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-12T18:32:21,368 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-12T18:32:21,368 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-12T18:32:21,369 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-12T18:32:21,369 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:32:21,369 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-12T18:32:21,370 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:32:21,370 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-12T18:32:21,372 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:21,374 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-12T18:32:21,374 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-12T18:32:21,374 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-12T18:32:21,375 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-12T18:32:21,375 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-12T18:32:21,375 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436341375,5,FailOnTimeoutGroup] 2024-11-12T18:32:21,376 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436341375,5,FailOnTimeoutGroup] 2024-11-12T18:32:21,376 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:21,376 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-12T18:32:21,376 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:21,377 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:21,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741831_1007 (size=1321) 2024-11-12T18:32:21,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741831_1007 (size=1321) 2024-11-12T18:32:21,380 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-12T18:32:21,380 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110 2024-11-12T18:32:21,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741832_1008 (size=32) 2024-11-12T18:32:21,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741832_1008 (size=32) 2024-11-12T18:32:21,387 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:32:21,388 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T18:32:21,389 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T18:32:21,389 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:32:21,389 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:32:21,390 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T18:32:21,390 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T18:32:21,391 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:32:21,391 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:32:21,391 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T18:32:21,392 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T18:32:21,392 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:32:21,392 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:32:21,392 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T18:32:21,393 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T18:32:21,393 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:32:21,394 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:32:21,394 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T18:32:21,394 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740 2024-11-12T18:32:21,395 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740 2024-11-12T18:32:21,396 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T18:32:21,396 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T18:32:21,396 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T18:32:21,397 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T18:32:21,399 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:32:21,399 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=719528, jitterRate=-0.08507344126701355}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T18:32:21,400 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731436341387Initializing all the Stores at 1731436341388 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436341388Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436341388Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436341388Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436341388Cleaning up temporary data from old regions at 1731436341396 (+8 ms)Region opened successfully at 1731436341400 (+4 ms) 2024-11-12T18:32:21,400 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T18:32:21,400 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T18:32:21,400 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T18:32:21,400 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T18:32:21,400 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T18:32:21,400 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T18:32:21,400 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731436341400Disabling compacts and flushes for region at 1731436341400Disabling writes for close at 1731436341400Writing region close event to WAL at 1731436341400Closed at 1731436341400 2024-11-12T18:32:21,402 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:32:21,402 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-12T18:32:21,402 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-12T18:32:21,403 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T18:32:21,404 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-12T18:32:21,432 INFO [RS:0;9911683f163c:37187 {}] regionserver.HRegionServer(746): ClusterId : 5650b3e7-c2e8-4074-aafd-a9a3f6e135d9 2024-11-12T18:32:21,432 DEBUG [RS:0;9911683f163c:37187 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T18:32:21,434 DEBUG [RS:0;9911683f163c:37187 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T18:32:21,434 DEBUG [RS:0;9911683f163c:37187 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T18:32:21,436 DEBUG [RS:0;9911683f163c:37187 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T18:32:21,436 DEBUG [RS:0;9911683f163c:37187 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@691718f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:32:21,449 DEBUG [RS:0;9911683f163c:37187 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;9911683f163c:37187 2024-11-12T18:32:21,449 INFO [RS:0;9911683f163c:37187 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T18:32:21,449 INFO [RS:0;9911683f163c:37187 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T18:32:21,449 DEBUG [RS:0;9911683f163c:37187 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T18:32:21,449 INFO [RS:0;9911683f163c:37187 {}] regionserver.HRegionServer(2659): reportForDuty to master=9911683f163c,38027,1731436341166 with port=37187, startcode=1731436341218 2024-11-12T18:32:21,450 DEBUG [RS:0;9911683f163c:37187 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T18:32:21,452 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36951, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T18:32:21,452 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38027 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9911683f163c,37187,1731436341218 2024-11-12T18:32:21,452 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38027 {}] master.ServerManager(517): Registering regionserver=9911683f163c,37187,1731436341218 2024-11-12T18:32:21,454 DEBUG [RS:0;9911683f163c:37187 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110 2024-11-12T18:32:21,454 DEBUG [RS:0;9911683f163c:37187 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38837 2024-11-12T18:32:21,454 DEBUG [RS:0;9911683f163c:37187 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T18:32:21,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:32:21,457 DEBUG [RS:0;9911683f163c:37187 {}] zookeeper.ZKUtil(111): regionserver:37187-0x1003543f23e0001, quorum=127.0.0.1:55721, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9911683f163c,37187,1731436341218 2024-11-12T18:32:21,457 WARN [RS:0;9911683f163c:37187 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:32:21,457 INFO [RS:0;9911683f163c:37187 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:32:21,457 DEBUG [RS:0;9911683f163c:37187 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/WALs/9911683f163c,37187,1731436341218 2024-11-12T18:32:21,457 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9911683f163c,37187,1731436341218] 2024-11-12T18:32:21,460 INFO [RS:0;9911683f163c:37187 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T18:32:21,462 INFO [RS:0;9911683f163c:37187 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T18:32:21,463 INFO [RS:0;9911683f163c:37187 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T18:32:21,463 INFO [RS:0;9911683f163c:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:21,465 INFO [RS:0;9911683f163c:37187 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T18:32:21,465 INFO [RS:0;9911683f163c:37187 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T18:32:21,466 INFO [RS:0;9911683f163c:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:21,466 DEBUG [RS:0;9911683f163c:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:32:21,466 DEBUG [RS:0;9911683f163c:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:32:21,466 DEBUG [RS:0;9911683f163c:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:32:21,466 DEBUG [RS:0;9911683f163c:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:32:21,466 DEBUG [RS:0;9911683f163c:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:32:21,466 DEBUG [RS:0;9911683f163c:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:32:21,466 DEBUG [RS:0;9911683f163c:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:32:21,466 DEBUG [RS:0;9911683f163c:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:32:21,466 DEBUG [RS:0;9911683f163c:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:32:21,466 DEBUG [RS:0;9911683f163c:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:32:21,466 DEBUG [RS:0;9911683f163c:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:32:21,466 DEBUG [RS:0;9911683f163c:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:32:21,466 DEBUG [RS:0;9911683f163c:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:32:21,466 DEBUG [RS:0;9911683f163c:37187 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:32:21,466 INFO [RS:0;9911683f163c:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:21,466 INFO [RS:0;9911683f163c:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:21,466 INFO [RS:0;9911683f163c:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:21,466 INFO [RS:0;9911683f163c:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:21,466 INFO [RS:0;9911683f163c:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:21,466 INFO [RS:0;9911683f163c:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,37187,1731436341218-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:32:21,482 INFO [RS:0;9911683f163c:37187 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T18:32:21,482 INFO [RS:0;9911683f163c:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,37187,1731436341218-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:21,482 INFO [RS:0;9911683f163c:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:21,482 INFO [RS:0;9911683f163c:37187 {}] regionserver.Replication(171): 9911683f163c,37187,1731436341218 started 2024-11-12T18:32:21,497 INFO [RS:0;9911683f163c:37187 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:21,497 INFO [RS:0;9911683f163c:37187 {}] regionserver.HRegionServer(1482): Serving as 9911683f163c,37187,1731436341218, RpcServer on 9911683f163c/172.17.0.3:37187, sessionid=0x1003543f23e0001 2024-11-12T18:32:21,498 DEBUG [RS:0;9911683f163c:37187 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T18:32:21,498 DEBUG [RS:0;9911683f163c:37187 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9911683f163c,37187,1731436341218 2024-11-12T18:32:21,498 DEBUG [RS:0;9911683f163c:37187 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,37187,1731436341218' 2024-11-12T18:32:21,498 DEBUG [RS:0;9911683f163c:37187 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T18:32:21,498 DEBUG [RS:0;9911683f163c:37187 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T18:32:21,498 DEBUG [RS:0;9911683f163c:37187 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T18:32:21,499 DEBUG [RS:0;9911683f163c:37187 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T18:32:21,499 DEBUG [RS:0;9911683f163c:37187 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9911683f163c,37187,1731436341218 2024-11-12T18:32:21,499 DEBUG [RS:0;9911683f163c:37187 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,37187,1731436341218' 2024-11-12T18:32:21,499 DEBUG [RS:0;9911683f163c:37187 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T18:32:21,499 DEBUG [RS:0;9911683f163c:37187 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T18:32:21,499 DEBUG [RS:0;9911683f163c:37187 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T18:32:21,499 INFO [RS:0;9911683f163c:37187 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T18:32:21,499 INFO [RS:0;9911683f163c:37187 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T18:32:21,555 WARN [9911683f163c:38027 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-12T18:32:21,601 INFO [RS:0;9911683f163c:37187 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C37187%2C1731436341218, suffix=, logDir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/WALs/9911683f163c,37187,1731436341218, archiveDir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/oldWALs, maxLogs=32 2024-11-12T18:32:21,602 INFO [RS:0;9911683f163c:37187 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C37187%2C1731436341218.1731436341602 2024-11-12T18:32:21,608 INFO [RS:0;9911683f163c:37187 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/WALs/9911683f163c,37187,1731436341218/9911683f163c%2C37187%2C1731436341218.1731436341602 2024-11-12T18:32:21,609 DEBUG [RS:0;9911683f163c:37187 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42091:42091),(127.0.0.1/127.0.0.1:41125:41125)] 2024-11-12T18:32:21,805 DEBUG [9911683f163c:38027 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-12T18:32:21,805 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=9911683f163c,37187,1731436341218 2024-11-12T18:32:21,807 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9911683f163c,37187,1731436341218, state=OPENING 2024-11-12T18:32:21,809 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-12T18:32:21,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1003543f23e0001, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:32:21,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:32:21,811 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:32:21,811 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:32:21,811 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T18:32:21,811 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=9911683f163c,37187,1731436341218}] 2024-11-12T18:32:21,964 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T18:32:21,966 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43967, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T18:32:21,969 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-12T18:32:21,969 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:32:21,971 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C37187%2C1731436341218.meta, suffix=.meta, logDir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/WALs/9911683f163c,37187,1731436341218, archiveDir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/oldWALs, maxLogs=32 2024-11-12T18:32:21,971 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C37187%2C1731436341218.meta.1731436341971.meta 2024-11-12T18:32:21,976 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/WALs/9911683f163c,37187,1731436341218/9911683f163c%2C37187%2C1731436341218.meta.1731436341971.meta 2024-11-12T18:32:21,977 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42091:42091),(127.0.0.1/127.0.0.1:41125:41125)] 2024-11-12T18:32:21,978 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:32:21,978 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-12T18:32:21,978 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-12T18:32:21,978 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-12T18:32:21,978 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-12T18:32:21,978 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:32:21,978 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-12T18:32:21,978 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-12T18:32:21,980 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T18:32:21,980 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T18:32:21,980 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:32:21,981 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:32:21,981 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T18:32:21,982 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T18:32:21,982 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:32:21,982 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:32:21,982 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T18:32:21,983 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T18:32:21,983 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:32:21,983 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:32:21,983 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T18:32:21,984 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T18:32:21,984 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:32:21,984 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:32:21,984 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T18:32:21,985 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740 2024-11-12T18:32:21,986 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740 2024-11-12T18:32:21,987 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T18:32:21,987 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T18:32:21,987 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T18:32:21,989 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T18:32:21,989 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=752503, jitterRate=-0.043143197894096375}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T18:32:21,989 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-12T18:32:21,990 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731436341979Writing region info on filesystem at 1731436341979Initializing all the Stores at 1731436341979Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436341979Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436341979Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436341980 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436341980Cleaning up temporary data from old regions at 1731436341987 (+7 ms)Running coprocessor post-open hooks at 1731436341989 (+2 ms)Region opened successfully at 1731436341990 (+1 ms) 2024-11-12T18:32:21,991 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731436341964 2024-11-12T18:32:21,993 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-12T18:32:21,993 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-12T18:32:21,994 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=9911683f163c,37187,1731436341218 2024-11-12T18:32:21,995 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9911683f163c,37187,1731436341218, state=OPEN 2024-11-12T18:32:22,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:32:22,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1003543f23e0001, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:32:22,000 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=9911683f163c,37187,1731436341218 2024-11-12T18:32:22,000 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:32:22,000 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:32:22,002 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-12T18:32:22,003 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=9911683f163c,37187,1731436341218 in 189 msec 2024-11-12T18:32:22,005 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-12T18:32:22,005 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 601 msec 2024-11-12T18:32:22,006 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:32:22,006 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-12T18:32:22,007 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T18:32:22,007 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9911683f163c,37187,1731436341218, seqNum=-1] 2024-11-12T18:32:22,008 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T18:32:22,009 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56037, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T18:32:22,014 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 651 msec 2024-11-12T18:32:22,014 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731436342014, completionTime=-1 2024-11-12T18:32:22,014 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-12T18:32:22,014 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-12T18:32:22,016 INFO [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-12T18:32:22,016 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731436402016 2024-11-12T18:32:22,016 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731436462016 2024-11-12T18:32:22,016 INFO [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-12T18:32:22,016 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,38027,1731436341166-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:22,016 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,38027,1731436341166-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:22,017 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,38027,1731436341166-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:22,017 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-9911683f163c:38027, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:22,017 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:22,017 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:22,018 DEBUG [master/9911683f163c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-12T18:32:22,020 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.771sec 2024-11-12T18:32:22,021 INFO [master/9911683f163c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-12T18:32:22,021 INFO [master/9911683f163c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-12T18:32:22,021 INFO [master/9911683f163c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-12T18:32:22,021 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-12T18:32:22,021 INFO [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-12T18:32:22,021 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,38027,1731436341166-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:32:22,021 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,38027,1731436341166-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-12T18:32:22,023 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-12T18:32:22,023 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-12T18:32:22,023 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,38027,1731436341166-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:32:22,032 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f200af9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:32:22,032 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 9911683f163c,38027,-1 for getting cluster id 2024-11-12T18:32:22,032 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-12T18:32:22,034 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5650b3e7-c2e8-4074-aafd-a9a3f6e135d9' 2024-11-12T18:32:22,034 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-12T18:32:22,034 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5650b3e7-c2e8-4074-aafd-a9a3f6e135d9" 2024-11-12T18:32:22,035 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75642934, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:32:22,035 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9911683f163c,38027,-1] 2024-11-12T18:32:22,035 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-12T18:32:22,035 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:32:22,036 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53232, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-12T18:32:22,037 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19b65b50, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:32:22,037 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T18:32:22,038 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9911683f163c,37187,1731436341218, seqNum=-1] 2024-11-12T18:32:22,038 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T18:32:22,039 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41368, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T18:32:22,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=9911683f163c,38027,1731436341166 2024-11-12T18:32:22,041 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:32:22,043 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-12T18:32:22,044 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-12T18:32:22,045 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 9911683f163c,38027,1731436341166 2024-11-12T18:32:22,045 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3b5791e3 2024-11-12T18:32:22,045 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-12T18:32:22,046 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53240, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-12T18:32:22,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38027 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-12T18:32:22,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38027 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-12T18:32:22,047 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38027 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T18:32:22,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38027 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-12T18:32:22,049 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-12T18:32:22,049 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:32:22,049 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38027 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-12T18:32:22,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38027 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T18:32:22,050 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-12T18:32:22,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741835_1011 (size=381) 2024-11-12T18:32:22,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741835_1011 (size=381) 2024-11-12T18:32:22,059 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1b3e80ef23618aa6d107df2581f000fc, NAME => 'TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110 2024-11-12T18:32:22,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741836_1012 (size=64) 2024-11-12T18:32:22,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741836_1012 (size=64) 2024-11-12T18:32:22,065 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:32:22,065 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 1b3e80ef23618aa6d107df2581f000fc, disabling compactions & flushes 2024-11-12T18:32:22,066 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc. 2024-11-12T18:32:22,066 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc. 2024-11-12T18:32:22,066 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc. after waiting 0 ms 2024-11-12T18:32:22,066 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc. 2024-11-12T18:32:22,066 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc. 2024-11-12T18:32:22,066 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1b3e80ef23618aa6d107df2581f000fc: Waiting for close lock at 1731436342065Disabling compacts and flushes for region at 1731436342065Disabling writes for close at 1731436342066 (+1 ms)Writing region close event to WAL at 1731436342066Closed at 1731436342066 2024-11-12T18:32:22,067 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-12T18:32:22,067 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731436342067"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731436342067"}]},"ts":"1731436342067"} 2024-11-12T18:32:22,070 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-12T18:32:22,071 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-12T18:32:22,071 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731436342071"}]},"ts":"1731436342071"} 2024-11-12T18:32:22,073 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-12T18:32:22,074 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1b3e80ef23618aa6d107df2581f000fc, ASSIGN}] 2024-11-12T18:32:22,075 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1b3e80ef23618aa6d107df2581f000fc, ASSIGN 2024-11-12T18:32:22,076 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1b3e80ef23618aa6d107df2581f000fc, ASSIGN; state=OFFLINE, location=9911683f163c,37187,1731436341218; forceNewPlan=false, retain=false 2024-11-12T18:32:22,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:22,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:22,226 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1b3e80ef23618aa6d107df2581f000fc, regionState=OPENING, regionLocation=9911683f163c,37187,1731436341218 2024-11-12T18:32:22,229 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1b3e80ef23618aa6d107df2581f000fc, ASSIGN because future has completed 2024-11-12T18:32:22,230 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1b3e80ef23618aa6d107df2581f000fc, server=9911683f163c,37187,1731436341218}] 2024-11-12T18:32:22,386 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc. 2024-11-12T18:32:22,386 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 1b3e80ef23618aa6d107df2581f000fc, NAME => 'TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc.', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:32:22,387 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:22,387 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:32:22,387 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:22,387 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:22,388 INFO [StoreOpener-1b3e80ef23618aa6d107df2581f000fc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:22,390 INFO [StoreOpener-1b3e80ef23618aa6d107df2581f000fc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1b3e80ef23618aa6d107df2581f000fc columnFamilyName info 2024-11-12T18:32:22,390 DEBUG [StoreOpener-1b3e80ef23618aa6d107df2581f000fc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:32:22,390 INFO [StoreOpener-1b3e80ef23618aa6d107df2581f000fc-1 {}] regionserver.HStore(327): Store=1b3e80ef23618aa6d107df2581f000fc/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:32:22,390 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:22,391 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:22,391 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:22,391 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:22,391 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:22,393 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:22,395 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:32:22,395 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 1b3e80ef23618aa6d107df2581f000fc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=734080, jitterRate=-0.06657017767429352}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-12T18:32:22,395 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:22,396 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 1b3e80ef23618aa6d107df2581f000fc: Running coprocessor pre-open hook at 1731436342387Writing region info on filesystem at 1731436342387Initializing all the Stores at 1731436342388 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436342388Cleaning up temporary data from old regions at 1731436342391 (+3 ms)Running coprocessor post-open hooks at 1731436342395 (+4 ms)Region opened successfully at 1731436342396 (+1 ms) 2024-11-12T18:32:22,397 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc., pid=6, masterSystemTime=1731436342383 2024-11-12T18:32:22,399 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc. 2024-11-12T18:32:22,399 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc. 2024-11-12T18:32:22,400 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1b3e80ef23618aa6d107df2581f000fc, regionState=OPEN, openSeqNum=2, regionLocation=9911683f163c,37187,1731436341218 2024-11-12T18:32:22,402 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1b3e80ef23618aa6d107df2581f000fc, server=9911683f163c,37187,1731436341218 because future has completed 2024-11-12T18:32:22,406 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-12T18:32:22,406 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 1b3e80ef23618aa6d107df2581f000fc, server=9911683f163c,37187,1731436341218 in 174 msec 2024-11-12T18:32:22,408 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-12T18:32:22,408 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1b3e80ef23618aa6d107df2581f000fc, ASSIGN in 333 msec 2024-11-12T18:32:22,409 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-12T18:32:22,410 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731436342409"}]},"ts":"1731436342409"} 2024-11-12T18:32:22,412 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-12T18:32:22,413 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-12T18:32:22,415 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 366 msec 2024-11-12T18:32:23,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:23,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:24,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:24,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:24,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:24,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:24,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:24,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:24,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:24,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:24,996 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:24,996 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,011 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,011 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,012 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,012 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,012 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,012 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,015 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,015 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,015 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,017 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:25,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:25,521 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-12T18:32:25,522 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,523 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,523 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,525 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,525 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,540 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,540 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,544 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,544 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,545 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:25,547 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:26,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:26,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:27,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:27,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:27,460 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-12T18:32:27,461 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-12T18:32:28,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:28,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:29,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:29,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:29,585 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-12T18:32:29,585 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-12T18:32:29,586 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-12T18:32:30,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:30,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:31,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:31,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:32,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38027 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T18:32:32,137 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-12T18:32:32,137 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-12T18:32:32,140 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-12T18:32:32,140 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc. 2024-11-12T18:32:32,143 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc., hostname=9911683f163c,37187,1731436341218, seqNum=2] 2024-11-12T18:32:32,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] regionserver.HRegion(8855): Flush requested on 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:32,155 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1b3e80ef23618aa6d107df2581f000fc 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-12T18:32:32,174 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/b759cd6ce0634f46bf7acde319f759af is 1080, key is row0001/info:/1731436352144/Put/seqid=0 2024-11-12T18:32:32,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741837_1013 (size=12509) 2024-11-12T18:32:32,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741837_1013 (size=12509) 2024-11-12T18:32:32,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/b759cd6ce0634f46bf7acde319f759af 2024-11-12T18:32:32,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/b759cd6ce0634f46bf7acde319f759af as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/b759cd6ce0634f46bf7acde319f759af 2024-11-12T18:32:32,197 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/b759cd6ce0634f46bf7acde319f759af, entries=7, sequenceid=11, filesize=12.2 K 2024-11-12T18:32:32,198 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 1b3e80ef23618aa6d107df2581f000fc in 43ms, sequenceid=11, compaction requested=false 2024-11-12T18:32:32,198 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1b3e80ef23618aa6d107df2581f000fc: 2024-11-12T18:32:32,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] regionserver.HRegion(8855): Flush requested on 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:32,199 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1b3e80ef23618aa6d107df2581f000fc 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-12T18:32:32,204 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/64af6a21b8fa4f2194893c90cfc416f0 is 1080, key is row0008/info:/1731436352156/Put/seqid=0 2024-11-12T18:32:32,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741838_1014 (size=29761) 2024-11-12T18:32:32,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741838_1014 (size=29761) 2024-11-12T18:32:32,211 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/64af6a21b8fa4f2194893c90cfc416f0 2024-11-12T18:32:32,218 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/64af6a21b8fa4f2194893c90cfc416f0 as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/64af6a21b8fa4f2194893c90cfc416f0 2024-11-12T18:32:32,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:32,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:32,225 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/64af6a21b8fa4f2194893c90cfc416f0, entries=23, sequenceid=37, filesize=29.1 K 2024-11-12T18:32:32,226 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 1b3e80ef23618aa6d107df2581f000fc in 27ms, sequenceid=37, compaction requested=false 2024-11-12T18:32:32,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1b3e80ef23618aa6d107df2581f000fc: 2024-11-12T18:32:32,227 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-12T18:32:32,227 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:32:32,227 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/64af6a21b8fa4f2194893c90cfc416f0 because midkey is the same as first or last row 2024-11-12T18:32:33,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:33,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:34,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] regionserver.HRegion(8855): Flush requested on 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:34,213 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1b3e80ef23618aa6d107df2581f000fc 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-12T18:32:34,217 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/4104bad721b74b1c982979dffd0ee03c is 1080, key is row0031/info:/1731436352201/Put/seqid=0 2024-11-12T18:32:34,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:34,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:34,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741839_1015 (size=12509) 2024-11-12T18:32:34,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741839_1015 (size=12509) 2024-11-12T18:32:34,223 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/4104bad721b74b1c982979dffd0ee03c 2024-11-12T18:32:34,229 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/4104bad721b74b1c982979dffd0ee03c as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/4104bad721b74b1c982979dffd0ee03c 2024-11-12T18:32:34,234 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/4104bad721b74b1c982979dffd0ee03c, entries=7, sequenceid=47, filesize=12.2 K 2024-11-12T18:32:34,235 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 1b3e80ef23618aa6d107df2581f000fc in 23ms, sequenceid=47, compaction requested=true 2024-11-12T18:32:34,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1b3e80ef23618aa6d107df2581f000fc: 2024-11-12T18:32:34,235 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-12T18:32:34,235 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:32:34,235 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/64af6a21b8fa4f2194893c90cfc416f0 because midkey is the same as first or last row 2024-11-12T18:32:34,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1b3e80ef23618aa6d107df2581f000fc:info, priority=-2147483648, current under compaction store size is 1 2024-11-12T18:32:34,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:32:34,236 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T18:32:34,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] regionserver.HRegion(8855): Flush requested on 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:34,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1b3e80ef23618aa6d107df2581f000fc 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-12T18:32:34,237 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T18:32:34,237 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1541): 1b3e80ef23618aa6d107df2581f000fc/info is initiating minor compaction (all files) 2024-11-12T18:32:34,237 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1b3e80ef23618aa6d107df2581f000fc/info in TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc. 2024-11-12T18:32:34,237 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/b759cd6ce0634f46bf7acde319f759af, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/64af6a21b8fa4f2194893c90cfc416f0, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/4104bad721b74b1c982979dffd0ee03c] into tmpdir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp, totalSize=53.5 K 2024-11-12T18:32:34,238 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting b759cd6ce0634f46bf7acde319f759af, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731436352144 2024-11-12T18:32:34,238 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting 64af6a21b8fa4f2194893c90cfc416f0, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731436352156 2024-11-12T18:32:34,239 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4104bad721b74b1c982979dffd0ee03c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731436352201 2024-11-12T18:32:34,241 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/530c9d002e824fcda1e25835d33a5f76 is 1080, key is row0038/info:/1731436354213/Put/seqid=0 2024-11-12T18:32:34,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741840_1016 (size=21141) 2024-11-12T18:32:34,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741840_1016 (size=21141) 2024-11-12T18:32:34,253 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/530c9d002e824fcda1e25835d33a5f76 2024-11-12T18:32:34,260 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1b3e80ef23618aa6d107df2581f000fc#info#compaction#58 average throughput is 12.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T18:32:34,260 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/da82b8f9d930423c94fc24913c1ec809 is 1080, key is row0001/info:/1731436352144/Put/seqid=0 2024-11-12T18:32:34,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/530c9d002e824fcda1e25835d33a5f76 as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/530c9d002e824fcda1e25835d33a5f76 2024-11-12T18:32:34,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741841_1017 (size=44978) 2024-11-12T18:32:34,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741841_1017 (size=44978) 2024-11-12T18:32:34,267 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/530c9d002e824fcda1e25835d33a5f76, entries=15, sequenceid=65, filesize=20.6 K 2024-11-12T18:32:34,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=12.61 KB/12912 for 1b3e80ef23618aa6d107df2581f000fc in 31ms, sequenceid=65, compaction requested=false 2024-11-12T18:32:34,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1b3e80ef23618aa6d107df2581f000fc: 2024-11-12T18:32:34,269 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=74.1 K, sizeToCheck=16.0 K 2024-11-12T18:32:34,269 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:32:34,269 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/64af6a21b8fa4f2194893c90cfc416f0 because midkey is the same as first or last row 2024-11-12T18:32:34,272 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/da82b8f9d930423c94fc24913c1ec809 as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/da82b8f9d930423c94fc24913c1ec809 2024-11-12T18:32:34,279 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1b3e80ef23618aa6d107df2581f000fc/info of 1b3e80ef23618aa6d107df2581f000fc into da82b8f9d930423c94fc24913c1ec809(size=43.9 K), total size for store is 64.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T18:32:34,279 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1b3e80ef23618aa6d107df2581f000fc: 2024-11-12T18:32:34,279 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc., storeName=1b3e80ef23618aa6d107df2581f000fc/info, priority=13, startTime=1731436354235; duration=0sec 2024-11-12T18:32:34,279 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-11-12T18:32:34,279 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:32:34,279 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/da82b8f9d930423c94fc24913c1ec809 because midkey is the same as first or last row 2024-11-12T18:32:34,279 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-11-12T18:32:34,279 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:32:34,279 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/da82b8f9d930423c94fc24913c1ec809 because midkey is the same as first or last row 2024-11-12T18:32:34,279 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-11-12T18:32:34,279 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:32:34,279 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/da82b8f9d930423c94fc24913c1ec809 because midkey is the same as first or last row 2024-11-12T18:32:34,279 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:32:34,279 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1b3e80ef23618aa6d107df2581f000fc:info 2024-11-12T18:32:35,088 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-12T18:32:35,089 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:35,090 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:35,090 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:35,091 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:35,091 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:35,091 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:35,091 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:35,092 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:35,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:35,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:35,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:35,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:35,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:35,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:35,113 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:35,113 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:35,113 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:35,116 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:35,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:35,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:36,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:36,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:36,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] regionserver.HRegion(8855): Flush requested on 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:36,264 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1b3e80ef23618aa6d107df2581f000fc 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-12T18:32:36,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/cd536297a17f4464910d62067c14f82c is 1080, key is row0053/info:/1731436354238/Put/seqid=0 2024-11-12T18:32:36,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741842_1018 (size=18987) 2024-11-12T18:32:36,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741842_1018 (size=18987) 2024-11-12T18:32:36,279 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/cd536297a17f4464910d62067c14f82c 2024-11-12T18:32:36,285 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/cd536297a17f4464910d62067c14f82c as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/cd536297a17f4464910d62067c14f82c 2024-11-12T18:32:36,291 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/cd536297a17f4464910d62067c14f82c, entries=13, sequenceid=82, filesize=18.5 K 2024-11-12T18:32:36,292 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=16.81 KB/17216 for 1b3e80ef23618aa6d107df2581f000fc in 28ms, sequenceid=82, compaction requested=true 2024-11-12T18:32:36,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1b3e80ef23618aa6d107df2581f000fc: 2024-11-12T18:32:36,292 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-12T18:32:36,292 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:32:36,292 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/da82b8f9d930423c94fc24913c1ec809 because midkey is the same as first or last row 2024-11-12T18:32:36,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1b3e80ef23618aa6d107df2581f000fc:info, priority=-2147483648, current under compaction store size is 1 2024-11-12T18:32:36,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:32:36,292 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T18:32:36,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] regionserver.HRegion(8855): Flush requested on 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:36,293 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1b3e80ef23618aa6d107df2581f000fc 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-12T18:32:36,293 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T18:32:36,293 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1541): 1b3e80ef23618aa6d107df2581f000fc/info is initiating minor compaction (all files) 2024-11-12T18:32:36,294 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1b3e80ef23618aa6d107df2581f000fc/info in TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc. 2024-11-12T18:32:36,294 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/da82b8f9d930423c94fc24913c1ec809, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/530c9d002e824fcda1e25835d33a5f76, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/cd536297a17f4464910d62067c14f82c] into tmpdir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp, totalSize=83.1 K 2024-11-12T18:32:36,294 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting da82b8f9d930423c94fc24913c1ec809, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731436352144 2024-11-12T18:32:36,295 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting 530c9d002e824fcda1e25835d33a5f76, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=65, earliestPutTs=1731436354213 2024-11-12T18:32:36,295 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting cd536297a17f4464910d62067c14f82c, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731436354238 2024-11-12T18:32:36,297 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/3e02853b7f614ff0bf35f89f5cc65681 is 1080, key is row0066/info:/1731436356265/Put/seqid=0 2024-11-12T18:32:36,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741843_1019 (size=23299) 2024-11-12T18:32:36,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741843_1019 (size=23299) 2024-11-12T18:32:36,304 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/3e02853b7f614ff0bf35f89f5cc65681 2024-11-12T18:32:36,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/3e02853b7f614ff0bf35f89f5cc65681 as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/3e02853b7f614ff0bf35f89f5cc65681 2024-11-12T18:32:36,311 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1b3e80ef23618aa6d107df2581f000fc#info#compaction#61 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T18:32:36,312 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/16033eef92f948cd93e5d67f2a724fb0 is 1080, key is row0001/info:/1731436352144/Put/seqid=0 2024-11-12T18:32:36,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741844_1020 (size=75378) 2024-11-12T18:32:36,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741844_1020 (size=75378) 2024-11-12T18:32:36,317 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/3e02853b7f614ff0bf35f89f5cc65681, entries=17, sequenceid=102, filesize=22.8 K 2024-11-12T18:32:36,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=1b3e80ef23618aa6d107df2581f000fc, server=9911683f163c,37187,1731436341218 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-12T18:32:36,318 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=12.61 KB/12912 for 1b3e80ef23618aa6d107df2581f000fc in 25ms, sequenceid=102, compaction requested=false 2024-11-12T18:32:36,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1b3e80ef23618aa6d107df2581f000fc: 2024-11-12T18:32:36,318 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=105.9 K, sizeToCheck=16.0 K 2024-11-12T18:32:36,318 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:32:36,318 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/da82b8f9d930423c94fc24913c1ec809 because midkey is the same as first or last row 2024-11-12T18:32:36,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:41368 deadline: 1731436366316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=1b3e80ef23618aa6d107df2581f000fc, server=9911683f163c,37187,1731436341218 2024-11-12T18:32:36,323 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/16033eef92f948cd93e5d67f2a724fb0 as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/16033eef92f948cd93e5d67f2a724fb0 2024-11-12T18:32:36,329 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1b3e80ef23618aa6d107df2581f000fc/info of 1b3e80ef23618aa6d107df2581f000fc into 16033eef92f948cd93e5d67f2a724fb0(size=73.6 K), total size for store is 96.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T18:32:36,329 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1b3e80ef23618aa6d107df2581f000fc: 2024-11-12T18:32:36,329 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc., storeName=1b3e80ef23618aa6d107df2581f000fc/info, priority=13, startTime=1731436356292; duration=0sec 2024-11-12T18:32:36,329 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=96.4 K, sizeToCheck=16.0 K 2024-11-12T18:32:36,329 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:32:36,329 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=96.4 K, sizeToCheck=16.0 K 2024-11-12T18:32:36,329 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:32:36,329 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=96.4 K, sizeToCheck=16.0 K 2024-11-12T18:32:36,330 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-12T18:32:36,330 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:32:36,330 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:32:36,331 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1b3e80ef23618aa6d107df2581f000fc:info 2024-11-12T18:32:36,332 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38027 {}] assignment.AssignmentManager(1355): Split request from 9911683f163c,37187,1731436341218, parent={ENCODED => 1b3e80ef23618aa6d107df2581f000fc, NAME => 'TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-12T18:32:36,338 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38027 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=9911683f163c,37187,1731436341218 2024-11-12T18:32:36,342 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc., hostname=9911683f163c,37187,1731436341218, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc., hostname=9911683f163c,37187,1731436341218, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=1b3e80ef23618aa6d107df2581f000fc, server=9911683f163c,37187,1731436341218 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-12T18:32:36,343 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc., hostname=9911683f163c,37187,1731436341218, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=1b3e80ef23618aa6d107df2581f000fc, server=9911683f163c,37187,1731436341218 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-12T18:32:36,343 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc., hostname=9911683f163c,37187,1731436341218, seqNum=2 because the exception is null or not the one we care about 2024-11-12T18:32:36,345 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38027 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1b3e80ef23618aa6d107df2581f000fc, daughterA=714000faee660033616ec98ef128761f, daughterB=e0d5726950b880e6329801d9c1d23318 2024-11-12T18:32:36,346 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1b3e80ef23618aa6d107df2581f000fc, daughterA=714000faee660033616ec98ef128761f, daughterB=e0d5726950b880e6329801d9c1d23318 2024-11-12T18:32:36,346 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1b3e80ef23618aa6d107df2581f000fc, daughterA=714000faee660033616ec98ef128761f, daughterB=e0d5726950b880e6329801d9c1d23318 2024-11-12T18:32:36,346 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1b3e80ef23618aa6d107df2581f000fc, daughterA=714000faee660033616ec98ef128761f, daughterB=e0d5726950b880e6329801d9c1d23318 2024-11-12T18:32:36,353 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1b3e80ef23618aa6d107df2581f000fc, UNASSIGN}] 2024-11-12T18:32:36,354 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1b3e80ef23618aa6d107df2581f000fc, UNASSIGN 2024-11-12T18:32:36,356 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=1b3e80ef23618aa6d107df2581f000fc, regionState=CLOSING, regionLocation=9911683f163c,37187,1731436341218 2024-11-12T18:32:36,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1b3e80ef23618aa6d107df2581f000fc, UNASSIGN because future has completed 2024-11-12T18:32:36,359 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-12T18:32:36,359 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1b3e80ef23618aa6d107df2581f000fc, server=9911683f163c,37187,1731436341218}] 2024-11-12T18:32:36,516 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:36,516 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-12T18:32:36,517 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 1b3e80ef23618aa6d107df2581f000fc, disabling compactions & flushes 2024-11-12T18:32:36,517 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc. 2024-11-12T18:32:36,517 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc. 2024-11-12T18:32:36,517 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc. after waiting 0 ms 2024-11-12T18:32:36,517 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc. 2024-11-12T18:32:36,517 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 1b3e80ef23618aa6d107df2581f000fc 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-12T18:32:36,522 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/5ac61515ec404d81b8aed4a3a13bd119 is 1080, key is row0083/info:/1731436356294/Put/seqid=0 2024-11-12T18:32:36,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741845_1021 (size=17894) 2024-11-12T18:32:36,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741845_1021 (size=17894) 2024-11-12T18:32:36,528 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/5ac61515ec404d81b8aed4a3a13bd119 2024-11-12T18:32:36,533 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/.tmp/info/5ac61515ec404d81b8aed4a3a13bd119 as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/5ac61515ec404d81b8aed4a3a13bd119 2024-11-12T18:32:36,538 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/5ac61515ec404d81b8aed4a3a13bd119, entries=12, sequenceid=118, filesize=17.5 K 2024-11-12T18:32:36,539 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=0 B/0 for 1b3e80ef23618aa6d107df2581f000fc in 22ms, sequenceid=118, compaction requested=true 2024-11-12T18:32:36,540 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/b759cd6ce0634f46bf7acde319f759af, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/64af6a21b8fa4f2194893c90cfc416f0, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/da82b8f9d930423c94fc24913c1ec809, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/4104bad721b74b1c982979dffd0ee03c, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/530c9d002e824fcda1e25835d33a5f76, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/cd536297a17f4464910d62067c14f82c] to archive 2024-11-12T18:32:36,541 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T18:32:36,542 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/b759cd6ce0634f46bf7acde319f759af to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/b759cd6ce0634f46bf7acde319f759af 2024-11-12T18:32:36,543 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/64af6a21b8fa4f2194893c90cfc416f0 to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/64af6a21b8fa4f2194893c90cfc416f0 2024-11-12T18:32:36,544 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/da82b8f9d930423c94fc24913c1ec809 to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/da82b8f9d930423c94fc24913c1ec809 2024-11-12T18:32:36,545 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/4104bad721b74b1c982979dffd0ee03c to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/4104bad721b74b1c982979dffd0ee03c 2024-11-12T18:32:36,546 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/530c9d002e824fcda1e25835d33a5f76 to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/530c9d002e824fcda1e25835d33a5f76 2024-11-12T18:32:36,547 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/cd536297a17f4464910d62067c14f82c to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/cd536297a17f4464910d62067c14f82c 2024-11-12T18:32:36,553 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=1 2024-11-12T18:32:36,554 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc. 2024-11-12T18:32:36,554 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 1b3e80ef23618aa6d107df2581f000fc: Waiting for close lock at 1731436356517Running coprocessor pre-close hooks at 1731436356517Disabling compacts and flushes for region at 1731436356517Disabling writes for close at 1731436356517Obtaining lock to block concurrent updates at 1731436356517Preparing flush snapshotting stores in 1b3e80ef23618aa6d107df2581f000fc at 1731436356517Finished memstore snapshotting TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc., syncing WAL and waiting on mvcc, flushsize=dataSize=12912, getHeapSize=14064, getOffHeapSize=0, getCellsCount=12 at 1731436356518 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc. at 1731436356518Flushing 1b3e80ef23618aa6d107df2581f000fc/info: creating writer at 1731436356519 (+1 ms)Flushing 1b3e80ef23618aa6d107df2581f000fc/info: appending metadata at 1731436356522 (+3 ms)Flushing 1b3e80ef23618aa6d107df2581f000fc/info: closing flushed file at 1731436356522Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7142c461: reopening flushed file at 1731436356532 (+10 ms)Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=0 B/0 for 1b3e80ef23618aa6d107df2581f000fc in 22ms, sequenceid=118, compaction requested=true at 1731436356539 (+7 ms)Writing region close event to WAL at 1731436356550 (+11 ms)Running coprocessor post-close hooks at 1731436356554 (+4 ms)Closed at 1731436356554 2024-11-12T18:32:36,556 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:36,557 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=1b3e80ef23618aa6d107df2581f000fc, regionState=CLOSED 2024-11-12T18:32:36,559 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1b3e80ef23618aa6d107df2581f000fc, server=9911683f163c,37187,1731436341218 because future has completed 2024-11-12T18:32:36,562 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-12T18:32:36,562 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 1b3e80ef23618aa6d107df2581f000fc, server=9911683f163c,37187,1731436341218 in 201 msec 2024-11-12T18:32:36,564 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-12T18:32:36,564 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1b3e80ef23618aa6d107df2581f000fc, UNASSIGN in 209 msec 2024-11-12T18:32:36,572 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:32:36,576 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=1b3e80ef23618aa6d107df2581f000fc, threads=3 2024-11-12T18:32:36,578 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/5ac61515ec404d81b8aed4a3a13bd119 for region: 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:36,578 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/16033eef92f948cd93e5d67f2a724fb0 for region: 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:36,578 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/3e02853b7f614ff0bf35f89f5cc65681 for region: 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:36,588 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/5ac61515ec404d81b8aed4a3a13bd119, top=true 2024-11-12T18:32:36,588 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/3e02853b7f614ff0bf35f89f5cc65681, top=true 2024-11-12T18:32:36,598 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/TestLogRolling-testLogRolling=1b3e80ef23618aa6d107df2581f000fc-5ac61515ec404d81b8aed4a3a13bd119 for child: e0d5726950b880e6329801d9c1d23318, parent: 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:36,598 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/TestLogRolling-testLogRolling=1b3e80ef23618aa6d107df2581f000fc-3e02853b7f614ff0bf35f89f5cc65681 for child: e0d5726950b880e6329801d9c1d23318, parent: 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:36,598 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/5ac61515ec404d81b8aed4a3a13bd119 for region: 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:36,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741846_1022 (size=27) 2024-11-12T18:32:36,598 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/3e02853b7f614ff0bf35f89f5cc65681 for region: 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:36,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741846_1022 (size=27) 2024-11-12T18:32:36,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741847_1023 (size=27) 2024-11-12T18:32:36,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741847_1023 (size=27) 2024-11-12T18:32:36,609 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/16033eef92f948cd93e5d67f2a724fb0 for region: 1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:32:36,611 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 1b3e80ef23618aa6d107df2581f000fc Daughter A: [hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/714000faee660033616ec98ef128761f/info/16033eef92f948cd93e5d67f2a724fb0.1b3e80ef23618aa6d107df2581f000fc] storefiles, Daughter B: [hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/16033eef92f948cd93e5d67f2a724fb0.1b3e80ef23618aa6d107df2581f000fc, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/TestLogRolling-testLogRolling=1b3e80ef23618aa6d107df2581f000fc-3e02853b7f614ff0bf35f89f5cc65681, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/TestLogRolling-testLogRolling=1b3e80ef23618aa6d107df2581f000fc-5ac61515ec404d81b8aed4a3a13bd119] storefiles. 2024-11-12T18:32:36,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741848_1024 (size=71) 2024-11-12T18:32:36,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741848_1024 (size=71) 2024-11-12T18:32:36,621 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:32:36,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741849_1025 (size=71) 2024-11-12T18:32:36,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741849_1025 (size=71) 2024-11-12T18:32:36,636 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:32:36,645 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/714000faee660033616ec98ef128761f/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=-1 2024-11-12T18:32:36,648 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=-1 2024-11-12T18:32:36,650 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731436356650"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731436356650"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731436356650"}]},"ts":"1731436356650"} 2024-11-12T18:32:36,650 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731436356650"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731436356650"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731436356650"}]},"ts":"1731436356650"} 2024-11-12T18:32:36,651 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731436356650"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731436356650"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731436356650"}]},"ts":"1731436356650"} 2024-11-12T18:32:36,669 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=714000faee660033616ec98ef128761f, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e0d5726950b880e6329801d9c1d23318, ASSIGN}] 2024-11-12T18:32:36,671 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=714000faee660033616ec98ef128761f, ASSIGN 2024-11-12T18:32:36,671 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e0d5726950b880e6329801d9c1d23318, ASSIGN 2024-11-12T18:32:36,672 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e0d5726950b880e6329801d9c1d23318, ASSIGN; state=SPLITTING_NEW, location=9911683f163c,37187,1731436341218; forceNewPlan=false, retain=false 2024-11-12T18:32:36,672 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=714000faee660033616ec98ef128761f, ASSIGN; state=SPLITTING_NEW, location=9911683f163c,37187,1731436341218; forceNewPlan=false, retain=false 2024-11-12T18:32:36,822 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=714000faee660033616ec98ef128761f, regionState=OPENING, regionLocation=9911683f163c,37187,1731436341218 2024-11-12T18:32:36,822 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=e0d5726950b880e6329801d9c1d23318, regionState=OPENING, regionLocation=9911683f163c,37187,1731436341218 2024-11-12T18:32:36,825 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e0d5726950b880e6329801d9c1d23318, ASSIGN because future has completed 2024-11-12T18:32:36,826 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure e0d5726950b880e6329801d9c1d23318, server=9911683f163c,37187,1731436341218}] 2024-11-12T18:32:36,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=714000faee660033616ec98ef128761f, ASSIGN because future has completed 2024-11-12T18:32:36,827 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 714000faee660033616ec98ef128761f, server=9911683f163c,37187,1731436341218}] 2024-11-12T18:32:36,982 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f. 2024-11-12T18:32:36,982 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 714000faee660033616ec98ef128761f, NAME => 'TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-12T18:32:36,982 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 714000faee660033616ec98ef128761f 2024-11-12T18:32:36,982 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:32:36,982 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 714000faee660033616ec98ef128761f 2024-11-12T18:32:36,982 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 714000faee660033616ec98ef128761f 2024-11-12T18:32:36,984 INFO [StoreOpener-714000faee660033616ec98ef128761f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 714000faee660033616ec98ef128761f 2024-11-12T18:32:36,984 INFO [StoreOpener-714000faee660033616ec98ef128761f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 714000faee660033616ec98ef128761f columnFamilyName info 2024-11-12T18:32:36,984 DEBUG [StoreOpener-714000faee660033616ec98ef128761f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:32:36,994 DEBUG [StoreOpener-714000faee660033616ec98ef128761f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/714000faee660033616ec98ef128761f/info/16033eef92f948cd93e5d67f2a724fb0.1b3e80ef23618aa6d107df2581f000fc->hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/16033eef92f948cd93e5d67f2a724fb0-bottom 2024-11-12T18:32:36,995 INFO [StoreOpener-714000faee660033616ec98ef128761f-1 {}] regionserver.HStore(327): Store=714000faee660033616ec98ef128761f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:32:36,995 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 714000faee660033616ec98ef128761f 2024-11-12T18:32:36,996 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/714000faee660033616ec98ef128761f 2024-11-12T18:32:36,997 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/714000faee660033616ec98ef128761f 2024-11-12T18:32:36,998 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 714000faee660033616ec98ef128761f 2024-11-12T18:32:36,998 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 714000faee660033616ec98ef128761f 2024-11-12T18:32:36,999 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 714000faee660033616ec98ef128761f 2024-11-12T18:32:37,000 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 714000faee660033616ec98ef128761f; next sequenceid=122; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=782256, jitterRate=-0.0053105950355529785}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-12T18:32:37,000 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 714000faee660033616ec98ef128761f 2024-11-12T18:32:37,000 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 714000faee660033616ec98ef128761f: Running coprocessor pre-open hook at 1731436356982Writing region info on filesystem at 1731436356982Initializing all the Stores at 1731436356983 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436356983Cleaning up temporary data from old regions at 1731436356998 (+15 ms)Running coprocessor post-open hooks at 1731436357000 (+2 ms)Region opened successfully at 1731436357000 2024-11-12T18:32:37,001 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f., pid=13, masterSystemTime=1731436356978 2024-11-12T18:32:37,001 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 714000faee660033616ec98ef128761f:info, priority=-2147483648, current under compaction store size is 1 2024-11-12T18:32:37,001 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:32:37,001 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-12T18:32:37,002 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f. 2024-11-12T18:32:37,002 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1541): 714000faee660033616ec98ef128761f/info is initiating minor compaction (all files) 2024-11-12T18:32:37,002 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 714000faee660033616ec98ef128761f/info in TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f. 2024-11-12T18:32:37,002 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/714000faee660033616ec98ef128761f/info/16033eef92f948cd93e5d67f2a724fb0.1b3e80ef23618aa6d107df2581f000fc->hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/16033eef92f948cd93e5d67f2a724fb0-bottom] into tmpdir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/714000faee660033616ec98ef128761f/.tmp, totalSize=73.6 K 2024-11-12T18:32:37,003 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting 16033eef92f948cd93e5d67f2a724fb0.1b3e80ef23618aa6d107df2581f000fc, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731436352144 2024-11-12T18:32:37,003 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f. 2024-11-12T18:32:37,003 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f. 2024-11-12T18:32:37,004 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318. 2024-11-12T18:32:37,004 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => e0d5726950b880e6329801d9c1d23318, NAME => 'TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-12T18:32:37,004 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling e0d5726950b880e6329801d9c1d23318 2024-11-12T18:32:37,004 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:32:37,004 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for e0d5726950b880e6329801d9c1d23318 2024-11-12T18:32:37,004 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for e0d5726950b880e6329801d9c1d23318 2024-11-12T18:32:37,004 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=714000faee660033616ec98ef128761f, regionState=OPEN, openSeqNum=122, regionLocation=9911683f163c,37187,1731436341218 2024-11-12T18:32:37,005 INFO [StoreOpener-e0d5726950b880e6329801d9c1d23318-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e0d5726950b880e6329801d9c1d23318 2024-11-12T18:32:37,006 INFO [StoreOpener-e0d5726950b880e6329801d9c1d23318-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e0d5726950b880e6329801d9c1d23318 columnFamilyName info 2024-11-12T18:32:37,006 DEBUG [StoreOpener-e0d5726950b880e6329801d9c1d23318-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:32:37,006 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37187 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-12T18:32:37,006 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-12T18:32:37,007 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-12T18:32:37,007 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 714000faee660033616ec98ef128761f, server=9911683f163c,37187,1731436341218 because future has completed 2024-11-12T18:32:37,011 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-12T18:32:37,011 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 714000faee660033616ec98ef128761f, server=9911683f163c,37187,1731436341218 in 181 msec 2024-11-12T18:32:37,013 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=714000faee660033616ec98ef128761f, ASSIGN in 342 msec 2024-11-12T18:32:37,019 DEBUG [StoreOpener-e0d5726950b880e6329801d9c1d23318-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/16033eef92f948cd93e5d67f2a724fb0.1b3e80ef23618aa6d107df2581f000fc->hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/16033eef92f948cd93e5d67f2a724fb0-top 2024-11-12T18:32:37,025 DEBUG [StoreOpener-e0d5726950b880e6329801d9c1d23318-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/TestLogRolling-testLogRolling=1b3e80ef23618aa6d107df2581f000fc-3e02853b7f614ff0bf35f89f5cc65681 2024-11-12T18:32:37,027 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 714000faee660033616ec98ef128761f#info#compaction#63 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T18:32:37,027 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/.tmp/info/45de82d2082d4103b9a5170ece59e54a is 193, key is TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318./info:regioninfo/1731436356822/Put/seqid=0 2024-11-12T18:32:37,027 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/714000faee660033616ec98ef128761f/.tmp/info/1ccd2fa32a5649b19882fab2753dab6b is 1080, key is row0001/info:/1731436352144/Put/seqid=0 2024-11-12T18:32:37,030 DEBUG [StoreOpener-e0d5726950b880e6329801d9c1d23318-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/TestLogRolling-testLogRolling=1b3e80ef23618aa6d107df2581f000fc-5ac61515ec404d81b8aed4a3a13bd119 2024-11-12T18:32:37,030 INFO [StoreOpener-e0d5726950b880e6329801d9c1d23318-1 {}] regionserver.HStore(327): Store=e0d5726950b880e6329801d9c1d23318/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:32:37,030 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for e0d5726950b880e6329801d9c1d23318 2024-11-12T18:32:37,032 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318 2024-11-12T18:32:37,033 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318 2024-11-12T18:32:37,034 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for e0d5726950b880e6329801d9c1d23318 2024-11-12T18:32:37,034 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for e0d5726950b880e6329801d9c1d23318 2024-11-12T18:32:37,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741851_1027 (size=9847) 2024-11-12T18:32:37,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741850_1026 (size=70862) 2024-11-12T18:32:37,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741850_1026 (size=70862) 2024-11-12T18:32:37,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741851_1027 (size=9847) 2024-11-12T18:32:37,035 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/.tmp/info/45de82d2082d4103b9a5170ece59e54a 2024-11-12T18:32:37,036 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for e0d5726950b880e6329801d9c1d23318 2024-11-12T18:32:37,037 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened e0d5726950b880e6329801d9c1d23318; next sequenceid=122; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=823202, jitterRate=0.046756669878959656}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-12T18:32:37,037 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e0d5726950b880e6329801d9c1d23318 2024-11-12T18:32:37,037 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for e0d5726950b880e6329801d9c1d23318: Running coprocessor pre-open hook at 1731436357004Writing region info on filesystem at 1731436357004Initializing all the Stores at 1731436357005 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436357005Cleaning up temporary data from old regions at 1731436357034 (+29 ms)Running coprocessor post-open hooks at 1731436357037 (+3 ms)Region opened successfully at 1731436357037 2024-11-12T18:32:37,038 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318., pid=12, masterSystemTime=1731436356978 2024-11-12T18:32:37,038 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store e0d5726950b880e6329801d9c1d23318:info, priority=-2147483648, current under compaction store size is 2 2024-11-12T18:32:37,038 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:32:37,038 DEBUG [RS:0;9911683f163c:37187-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T18:32:37,040 INFO [RS:0;9911683f163c:37187-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318. 2024-11-12T18:32:37,040 DEBUG [RS:0;9911683f163c:37187-longCompactions-0 {}] regionserver.HStore(1541): e0d5726950b880e6329801d9c1d23318/info is initiating minor compaction (all files) 2024-11-12T18:32:37,040 INFO [RS:0;9911683f163c:37187-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e0d5726950b880e6329801d9c1d23318/info in TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318. 2024-11-12T18:32:37,040 INFO [RS:0;9911683f163c:37187-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/16033eef92f948cd93e5d67f2a724fb0.1b3e80ef23618aa6d107df2581f000fc->hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/16033eef92f948cd93e5d67f2a724fb0-top, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/TestLogRolling-testLogRolling=1b3e80ef23618aa6d107df2581f000fc-3e02853b7f614ff0bf35f89f5cc65681, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/TestLogRolling-testLogRolling=1b3e80ef23618aa6d107df2581f000fc-5ac61515ec404d81b8aed4a3a13bd119] into tmpdir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp, totalSize=113.8 K 2024-11-12T18:32:37,041 DEBUG [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318. 2024-11-12T18:32:37,041 INFO [RS_OPEN_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318. 2024-11-12T18:32:37,041 DEBUG [RS:0;9911683f163c:37187-longCompactions-0 {}] compactions.Compactor(225): Compacting 16033eef92f948cd93e5d67f2a724fb0.1b3e80ef23618aa6d107df2581f000fc, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1731436352144 2024-11-12T18:32:37,041 DEBUG [RS:0;9911683f163c:37187-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=1b3e80ef23618aa6d107df2581f000fc-3e02853b7f614ff0bf35f89f5cc65681, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1731436356265 2024-11-12T18:32:37,042 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/714000faee660033616ec98ef128761f/.tmp/info/1ccd2fa32a5649b19882fab2753dab6b as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/714000faee660033616ec98ef128761f/info/1ccd2fa32a5649b19882fab2753dab6b 2024-11-12T18:32:37,042 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=e0d5726950b880e6329801d9c1d23318, regionState=OPEN, openSeqNum=122, regionLocation=9911683f163c,37187,1731436341218 2024-11-12T18:32:37,042 DEBUG [RS:0;9911683f163c:37187-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=1b3e80ef23618aa6d107df2581f000fc-5ac61515ec404d81b8aed4a3a13bd119, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1731436356294 2024-11-12T18:32:37,044 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure e0d5726950b880e6329801d9c1d23318, server=9911683f163c,37187,1731436341218 because future has completed 2024-11-12T18:32:37,049 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-12T18:32:37,049 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure e0d5726950b880e6329801d9c1d23318, server=9911683f163c,37187,1731436341218 in 221 msec 2024-11-12T18:32:37,050 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 714000faee660033616ec98ef128761f/info of 714000faee660033616ec98ef128761f into 1ccd2fa32a5649b19882fab2753dab6b(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T18:32:37,050 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 714000faee660033616ec98ef128761f: 2024-11-12T18:32:37,050 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f., storeName=714000faee660033616ec98ef128761f/info, priority=15, startTime=1731436357001; duration=0sec 2024-11-12T18:32:37,050 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:32:37,050 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 714000faee660033616ec98ef128761f:info 2024-11-12T18:32:37,054 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-12T18:32:37,054 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e0d5726950b880e6329801d9c1d23318, ASSIGN in 380 msec 2024-11-12T18:32:37,056 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1b3e80ef23618aa6d107df2581f000fc, daughterA=714000faee660033616ec98ef128761f, daughterB=e0d5726950b880e6329801d9c1d23318 in 716 msec 2024-11-12T18:32:37,059 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/.tmp/ns/0e9e334a1932458f80602832b9a574f5 is 43, key is default/ns:d/1731436342009/Put/seqid=0 2024-11-12T18:32:37,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741852_1028 (size=5153) 2024-11-12T18:32:37,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741852_1028 (size=5153) 2024-11-12T18:32:37,068 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/.tmp/ns/0e9e334a1932458f80602832b9a574f5 2024-11-12T18:32:37,074 INFO [RS:0;9911683f163c:37187-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e0d5726950b880e6329801d9c1d23318#info#compaction#66 average throughput is 33.86 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T18:32:37,074 DEBUG [RS:0;9911683f163c:37187-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/0632e6f0c4c04088a4392d47daa7c6dd is 1080, key is row0062/info:/1731436354256/Put/seqid=0 2024-11-12T18:32:37,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741853_1029 (size=40830) 2024-11-12T18:32:37,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741853_1029 (size=40830) 2024-11-12T18:32:37,089 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/.tmp/table/751f1ec645e4484d843e2bae96973b8e is 65, key is TestLogRolling-testLogRolling/table:state/1731436342409/Put/seqid=0 2024-11-12T18:32:37,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741854_1030 (size=5340) 2024-11-12T18:32:37,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741854_1030 (size=5340) 2024-11-12T18:32:37,094 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/.tmp/table/751f1ec645e4484d843e2bae96973b8e 2024-11-12T18:32:37,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/.tmp/info/45de82d2082d4103b9a5170ece59e54a as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/info/45de82d2082d4103b9a5170ece59e54a 2024-11-12T18:32:37,104 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/info/45de82d2082d4103b9a5170ece59e54a, entries=30, sequenceid=17, filesize=9.6 K 2024-11-12T18:32:37,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/.tmp/ns/0e9e334a1932458f80602832b9a574f5 as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/ns/0e9e334a1932458f80602832b9a574f5 2024-11-12T18:32:37,110 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/ns/0e9e334a1932458f80602832b9a574f5, entries=2, sequenceid=17, filesize=5.0 K 2024-11-12T18:32:37,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/.tmp/table/751f1ec645e4484d843e2bae96973b8e as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/table/751f1ec645e4484d843e2bae96973b8e 2024-11-12T18:32:37,117 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/table/751f1ec645e4484d843e2bae96973b8e, entries=2, sequenceid=17, filesize=5.2 K 2024-11-12T18:32:37,118 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 111ms, sequenceid=17, compaction requested=false 2024-11-12T18:32:37,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-12T18:32:37,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:37,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:37,488 DEBUG [RS:0;9911683f163c:37187-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/0632e6f0c4c04088a4392d47daa7c6dd as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/0632e6f0c4c04088a4392d47daa7c6dd 2024-11-12T18:32:37,493 INFO [RS:0;9911683f163c:37187-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e0d5726950b880e6329801d9c1d23318/info of e0d5726950b880e6329801d9c1d23318 into 0632e6f0c4c04088a4392d47daa7c6dd(size=39.9 K), total size for store is 39.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T18:32:37,494 DEBUG [RS:0;9911683f163c:37187-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e0d5726950b880e6329801d9c1d23318: 2024-11-12T18:32:37,494 INFO [RS:0;9911683f163c:37187-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318., storeName=e0d5726950b880e6329801d9c1d23318/info, priority=13, startTime=1731436357038; duration=0sec 2024-11-12T18:32:37,494 DEBUG [RS:0;9911683f163c:37187-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:32:37,494 DEBUG [RS:0;9911683f163c:37187-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e0d5726950b880e6329801d9c1d23318:info 2024-11-12T18:32:38,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:38,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:39,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:39,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:40,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:40,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:41,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:41,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:42,056 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-12T18:32:42,057 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:42,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:42,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:42,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:42,059 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:42,059 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:42,059 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:42,060 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:42,077 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:42,078 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:42,078 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:42,078 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:42,078 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:42,079 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:42,082 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:42,082 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:42,082 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:42,084 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T18:32:42,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:42,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:43,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:43,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:44,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:44,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:45,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:45,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:46,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:46,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:46,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:41368 deadline: 1731436376396, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc. is not online on 9911683f163c,37187,1731436341218 2024-11-12T18:32:46,398 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc., hostname=9911683f163c,37187,1731436341218, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc., hostname=9911683f163c,37187,1731436341218, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc. is not online on 9911683f163c,37187,1731436341218 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-12T18:32:46,398 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc., hostname=9911683f163c,37187,1731436341218, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc. is not online on 9911683f163c,37187,1731436341218 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-12T18:32:46,398 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731436342046.1b3e80ef23618aa6d107df2581f000fc., hostname=9911683f163c,37187,1731436341218, seqNum=2 from cache 2024-11-12T18:32:47,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:47,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:48,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:48,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:49,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:49,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:50,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:50,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:51,150 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-12T18:32:51,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:51,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:52,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:52,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:53,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:53,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:54,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:54,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:55,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:55,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:56,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:56,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:57,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:57,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:58,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:58,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:59,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:32:59,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:00,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:00,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:01,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:01,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:02,024 INFO [master/9911683f163c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-12T18:33:02,024 INFO [master/9911683f163c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-12T18:33:02,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:02,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:03,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:03,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:04,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:04,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:05,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:05,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:06,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:06,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:06,452 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0095', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318., hostname=9911683f163c,37187,1731436341218, seqNum=122] 2024-11-12T18:33:06,978 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-12T18:33:07,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:07,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:08,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:08,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:08,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] regionserver.HRegion(8855): Flush requested on e0d5726950b880e6329801d9c1d23318 2024-11-12T18:33:08,466 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0d5726950b880e6329801d9c1d23318 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-12T18:33:08,471 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/d34ed069374d4bc99d2d3aa5d4680dee is 1080, key is row0095/info:/1731436386453/Put/seqid=0 2024-11-12T18:33:08,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741855_1031 (size=12513) 2024-11-12T18:33:08,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741855_1031 (size=12513) 2024-11-12T18:33:08,477 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/d34ed069374d4bc99d2d3aa5d4680dee 2024-11-12T18:33:08,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/d34ed069374d4bc99d2d3aa5d4680dee as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/d34ed069374d4bc99d2d3aa5d4680dee 2024-11-12T18:33:08,487 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/d34ed069374d4bc99d2d3aa5d4680dee, entries=7, sequenceid=132, filesize=12.2 K 2024-11-12T18:33:08,488 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for e0d5726950b880e6329801d9c1d23318 in 22ms, sequenceid=132, compaction requested=false 2024-11-12T18:33:08,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0d5726950b880e6329801d9c1d23318: 2024-11-12T18:33:08,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] regionserver.HRegion(8855): Flush requested on e0d5726950b880e6329801d9c1d23318 2024-11-12T18:33:08,489 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0d5726950b880e6329801d9c1d23318 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-12T18:33:08,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/7c88bad61eaa4378b4350ed985af80ef is 1080, key is row0102/info:/1731436388467/Put/seqid=0 2024-11-12T18:33:08,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741856_1032 (size=21156) 2024-11-12T18:33:08,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741856_1032 (size=21156) 2024-11-12T18:33:08,521 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=150 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/7c88bad61eaa4378b4350ed985af80ef 2024-11-12T18:33:08,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/7c88bad61eaa4378b4350ed985af80ef as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/7c88bad61eaa4378b4350ed985af80ef 2024-11-12T18:33:08,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/7c88bad61eaa4378b4350ed985af80ef, entries=15, sequenceid=150, filesize=20.7 K 2024-11-12T18:33:08,536 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=12.61 KB/12912 for e0d5726950b880e6329801d9c1d23318 in 47ms, sequenceid=150, compaction requested=true 2024-11-12T18:33:08,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0d5726950b880e6329801d9c1d23318: 2024-11-12T18:33:08,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e0d5726950b880e6329801d9c1d23318:info, priority=-2147483648, current under compaction store size is 1 2024-11-12T18:33:08,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:33:08,536 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T18:33:08,538 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 74499 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T18:33:08,538 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1541): e0d5726950b880e6329801d9c1d23318/info is initiating minor compaction (all files) 2024-11-12T18:33:08,538 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e0d5726950b880e6329801d9c1d23318/info in TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318. 2024-11-12T18:33:08,538 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/0632e6f0c4c04088a4392d47daa7c6dd, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/d34ed069374d4bc99d2d3aa5d4680dee, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/7c88bad61eaa4378b4350ed985af80ef] into tmpdir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp, totalSize=72.8 K 2024-11-12T18:33:08,539 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0632e6f0c4c04088a4392d47daa7c6dd, keycount=33, bloomtype=ROW, size=39.9 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1731436354256 2024-11-12T18:33:08,539 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting d34ed069374d4bc99d2d3aa5d4680dee, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1731436386453 2024-11-12T18:33:08,539 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7c88bad61eaa4378b4350ed985af80ef, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=150, earliestPutTs=1731436388467 2024-11-12T18:33:08,558 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e0d5726950b880e6329801d9c1d23318#info#compaction#70 average throughput is 28.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T18:33:08,559 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/b919cf0f800449fc9380e7d007c38b9d is 1080, key is row0062/info:/1731436354256/Put/seqid=0 2024-11-12T18:33:08,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741857_1033 (size=64713) 2024-11-12T18:33:08,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741857_1033 (size=64713) 2024-11-12T18:33:08,575 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/b919cf0f800449fc9380e7d007c38b9d as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/b919cf0f800449fc9380e7d007c38b9d 2024-11-12T18:33:08,583 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e0d5726950b880e6329801d9c1d23318/info of e0d5726950b880e6329801d9c1d23318 into b919cf0f800449fc9380e7d007c38b9d(size=63.2 K), total size for store is 63.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T18:33:08,583 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e0d5726950b880e6329801d9c1d23318: 2024-11-12T18:33:08,583 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318., storeName=e0d5726950b880e6329801d9c1d23318/info, priority=13, startTime=1731436388536; duration=0sec 2024-11-12T18:33:08,583 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:33:08,583 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e0d5726950b880e6329801d9c1d23318:info 2024-11-12T18:33:09,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:09,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:10,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:10,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:10,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] regionserver.HRegion(8855): Flush requested on e0d5726950b880e6329801d9c1d23318 2024-11-12T18:33:10,538 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0d5726950b880e6329801d9c1d23318 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-12T18:33:10,542 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/e95844c307a64303af97ed00bc200140 is 1080, key is row0117/info:/1731436388490/Put/seqid=0 2024-11-12T18:33:10,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741858_1034 (size=19000) 2024-11-12T18:33:10,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741858_1034 (size=19000) 2024-11-12T18:33:10,548 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/e95844c307a64303af97ed00bc200140 2024-11-12T18:33:10,554 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/e95844c307a64303af97ed00bc200140 as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/e95844c307a64303af97ed00bc200140 2024-11-12T18:33:10,559 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/e95844c307a64303af97ed00bc200140, entries=13, sequenceid=167, filesize=18.6 K 2024-11-12T18:33:10,560 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=13.66 KB/13988 for e0d5726950b880e6329801d9c1d23318 in 22ms, sequenceid=167, compaction requested=false 2024-11-12T18:33:10,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0d5726950b880e6329801d9c1d23318: 2024-11-12T18:33:10,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] regionserver.HRegion(8855): Flush requested on e0d5726950b880e6329801d9c1d23318 2024-11-12T18:33:10,560 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0d5726950b880e6329801d9c1d23318 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-12T18:33:10,564 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/0af59ab058284ad6ba2249757791be82 is 1080, key is row0130/info:/1731436390539/Put/seqid=0 2024-11-12T18:33:10,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741859_1035 (size=20078) 2024-11-12T18:33:10,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741859_1035 (size=20078) 2024-11-12T18:33:10,571 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/0af59ab058284ad6ba2249757791be82 2024-11-12T18:33:10,577 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/0af59ab058284ad6ba2249757791be82 as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/0af59ab058284ad6ba2249757791be82 2024-11-12T18:33:10,583 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/0af59ab058284ad6ba2249757791be82, entries=14, sequenceid=184, filesize=19.6 K 2024-11-12T18:33:10,584 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=14.71 KB/15064 for e0d5726950b880e6329801d9c1d23318 in 24ms, sequenceid=184, compaction requested=true 2024-11-12T18:33:10,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0d5726950b880e6329801d9c1d23318: 2024-11-12T18:33:10,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e0d5726950b880e6329801d9c1d23318:info, priority=-2147483648, current under compaction store size is 1 2024-11-12T18:33:10,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:33:10,584 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T18:33:10,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] regionserver.HRegion(8855): Flush requested on e0d5726950b880e6329801d9c1d23318 2024-11-12T18:33:10,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0d5726950b880e6329801d9c1d23318 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-12T18:33:10,586 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103791 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T18:33:10,586 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1541): e0d5726950b880e6329801d9c1d23318/info is initiating minor compaction (all files) 2024-11-12T18:33:10,586 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e0d5726950b880e6329801d9c1d23318/info in TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318. 2024-11-12T18:33:10,586 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/b919cf0f800449fc9380e7d007c38b9d, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/e95844c307a64303af97ed00bc200140, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/0af59ab058284ad6ba2249757791be82] into tmpdir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp, totalSize=101.4 K 2024-11-12T18:33:10,586 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting b919cf0f800449fc9380e7d007c38b9d, keycount=55, bloomtype=ROW, size=63.2 K, encoding=NONE, compression=NONE, seqNum=150, earliestPutTs=1731436354256 2024-11-12T18:33:10,587 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting e95844c307a64303af97ed00bc200140, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1731436388490 2024-11-12T18:33:10,587 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0af59ab058284ad6ba2249757791be82, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=184, earliestPutTs=1731436390539 2024-11-12T18:33:10,590 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/436e4be9d9144bc085534bd23d0622eb is 1080, key is row0144/info:/1731436390561/Put/seqid=0 2024-11-12T18:33:10,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741860_1036 (size=21156) 2024-11-12T18:33:10,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741860_1036 (size=21156) 2024-11-12T18:33:10,599 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/436e4be9d9144bc085534bd23d0622eb 2024-11-12T18:33:10,604 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e0d5726950b880e6329801d9c1d23318#info#compaction#74 average throughput is 42.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T18:33:10,605 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/d63c5d1efee44f4a9cd9879d30019142 is 1080, key is row0062/info:/1731436354256/Put/seqid=0 2024-11-12T18:33:10,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/436e4be9d9144bc085534bd23d0622eb as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/436e4be9d9144bc085534bd23d0622eb 2024-11-12T18:33:10,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741861_1037 (size=93998) 2024-11-12T18:33:10,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741861_1037 (size=93998) 2024-11-12T18:33:10,614 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/436e4be9d9144bc085534bd23d0622eb, entries=15, sequenceid=202, filesize=20.7 K 2024-11-12T18:33:10,615 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=2.10 KB/2152 for e0d5726950b880e6329801d9c1d23318 in 30ms, sequenceid=202, compaction requested=false 2024-11-12T18:33:10,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0d5726950b880e6329801d9c1d23318: 2024-11-12T18:33:10,615 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/d63c5d1efee44f4a9cd9879d30019142 as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/d63c5d1efee44f4a9cd9879d30019142 2024-11-12T18:33:10,621 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e0d5726950b880e6329801d9c1d23318/info of e0d5726950b880e6329801d9c1d23318 into d63c5d1efee44f4a9cd9879d30019142(size=91.8 K), total size for store is 112.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T18:33:10,621 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e0d5726950b880e6329801d9c1d23318: 2024-11-12T18:33:10,621 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318., storeName=e0d5726950b880e6329801d9c1d23318/info, priority=13, startTime=1731436390584; duration=0sec 2024-11-12T18:33:10,621 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:33:10,621 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e0d5726950b880e6329801d9c1d23318:info 2024-11-12T18:33:11,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:11,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:12,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:12,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:12,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] regionserver.HRegion(8855): Flush requested on e0d5726950b880e6329801d9c1d23318 2024-11-12T18:33:12,597 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0d5726950b880e6329801d9c1d23318 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-12T18:33:12,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/cd48f1565c004cb4a8ed57cc98217f0b is 1080, key is row0159/info:/1731436390586/Put/seqid=0 2024-11-12T18:33:12,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741862_1038 (size=12516) 2024-11-12T18:33:12,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741862_1038 (size=12516) 2024-11-12T18:33:12,608 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/cd48f1565c004cb4a8ed57cc98217f0b 2024-11-12T18:33:12,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/cd48f1565c004cb4a8ed57cc98217f0b as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/cd48f1565c004cb4a8ed57cc98217f0b 2024-11-12T18:33:12,618 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/cd48f1565c004cb4a8ed57cc98217f0b, entries=7, sequenceid=213, filesize=12.2 K 2024-11-12T18:33:12,619 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for e0d5726950b880e6329801d9c1d23318 in 22ms, sequenceid=213, compaction requested=true 2024-11-12T18:33:12,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0d5726950b880e6329801d9c1d23318: 2024-11-12T18:33:12,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e0d5726950b880e6329801d9c1d23318:info, priority=-2147483648, current under compaction store size is 1 2024-11-12T18:33:12,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:33:12,619 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T18:33:12,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] regionserver.HRegion(8855): Flush requested on e0d5726950b880e6329801d9c1d23318 2024-11-12T18:33:12,620 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0d5726950b880e6329801d9c1d23318 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-12T18:33:12,621 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 127670 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T18:33:12,621 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1541): e0d5726950b880e6329801d9c1d23318/info is initiating minor compaction (all files) 2024-11-12T18:33:12,621 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e0d5726950b880e6329801d9c1d23318/info in TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318. 2024-11-12T18:33:12,621 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/d63c5d1efee44f4a9cd9879d30019142, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/436e4be9d9144bc085534bd23d0622eb, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/cd48f1565c004cb4a8ed57cc98217f0b] into tmpdir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp, totalSize=124.7 K 2024-11-12T18:33:12,621 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting d63c5d1efee44f4a9cd9879d30019142, keycount=82, bloomtype=ROW, size=91.8 K, encoding=NONE, compression=NONE, seqNum=184, earliestPutTs=1731436354256 2024-11-12T18:33:12,622 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting 436e4be9d9144bc085534bd23d0622eb, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1731436390561 2024-11-12T18:33:12,622 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting cd48f1565c004cb4a8ed57cc98217f0b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731436390586 2024-11-12T18:33:12,624 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/4c38ecc0ff05408ba2ca9ae324dcfb5e is 1080, key is row0166/info:/1731436392598/Put/seqid=0 2024-11-12T18:33:12,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741863_1039 (size=20078) 2024-11-12T18:33:12,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741863_1039 (size=20078) 2024-11-12T18:33:12,631 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/4c38ecc0ff05408ba2ca9ae324dcfb5e 2024-11-12T18:33:12,635 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e0d5726950b880e6329801d9c1d23318#info#compaction#77 average throughput is 53.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T18:33:12,636 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/99fc00591efc477b87411c8bef54d2d1 is 1080, key is row0062/info:/1731436354256/Put/seqid=0 2024-11-12T18:33:12,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/4c38ecc0ff05408ba2ca9ae324dcfb5e as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/4c38ecc0ff05408ba2ca9ae324dcfb5e 2024-11-12T18:33:12,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741864_1040 (size=117820) 2024-11-12T18:33:12,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741864_1040 (size=117820) 2024-11-12T18:33:12,644 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/4c38ecc0ff05408ba2ca9ae324dcfb5e, entries=14, sequenceid=230, filesize=19.6 K 2024-11-12T18:33:12,645 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for e0d5726950b880e6329801d9c1d23318 in 25ms, sequenceid=230, compaction requested=false 2024-11-12T18:33:12,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0d5726950b880e6329801d9c1d23318: 2024-11-12T18:33:12,645 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/99fc00591efc477b87411c8bef54d2d1 as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/99fc00591efc477b87411c8bef54d2d1 2024-11-12T18:33:12,651 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e0d5726950b880e6329801d9c1d23318/info of e0d5726950b880e6329801d9c1d23318 into 99fc00591efc477b87411c8bef54d2d1(size=115.1 K), total size for store is 134.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T18:33:12,651 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e0d5726950b880e6329801d9c1d23318: 2024-11-12T18:33:12,651 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318., storeName=e0d5726950b880e6329801d9c1d23318/info, priority=13, startTime=1731436392619; duration=0sec 2024-11-12T18:33:12,651 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:33:12,651 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e0d5726950b880e6329801d9c1d23318:info 2024-11-12T18:33:13,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:13,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:14,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:14,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:14,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] regionserver.HRegion(8855): Flush requested on e0d5726950b880e6329801d9c1d23318 2024-11-12T18:33:14,647 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0d5726950b880e6329801d9c1d23318 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-12T18:33:14,651 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/3e4ae5b93636443f908de243565814fb is 1080, key is row0180/info:/1731436392621/Put/seqid=0 2024-11-12T18:33:14,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741865_1041 (size=20078) 2024-11-12T18:33:14,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741865_1041 (size=20078) 2024-11-12T18:33:14,658 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/3e4ae5b93636443f908de243565814fb 2024-11-12T18:33:14,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/3e4ae5b93636443f908de243565814fb as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/3e4ae5b93636443f908de243565814fb 2024-11-12T18:33:14,669 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/3e4ae5b93636443f908de243565814fb, entries=14, sequenceid=248, filesize=19.6 K 2024-11-12T18:33:14,670 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=14.71 KB/15064 for e0d5726950b880e6329801d9c1d23318 in 23ms, sequenceid=248, compaction requested=true 2024-11-12T18:33:14,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0d5726950b880e6329801d9c1d23318: 2024-11-12T18:33:14,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e0d5726950b880e6329801d9c1d23318:info, priority=-2147483648, current under compaction store size is 1 2024-11-12T18:33:14,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:33:14,670 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T18:33:14,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] regionserver.HRegion(8855): Flush requested on e0d5726950b880e6329801d9c1d23318 2024-11-12T18:33:14,671 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0d5726950b880e6329801d9c1d23318 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-12T18:33:14,671 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 157976 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T18:33:14,672 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1541): e0d5726950b880e6329801d9c1d23318/info is initiating minor compaction (all files) 2024-11-12T18:33:14,672 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e0d5726950b880e6329801d9c1d23318/info in TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318. 2024-11-12T18:33:14,672 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/99fc00591efc477b87411c8bef54d2d1, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/4c38ecc0ff05408ba2ca9ae324dcfb5e, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/3e4ae5b93636443f908de243565814fb] into tmpdir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp, totalSize=154.3 K 2024-11-12T18:33:14,672 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting 99fc00591efc477b87411c8bef54d2d1, keycount=104, bloomtype=ROW, size=115.1 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731436354256 2024-11-12T18:33:14,673 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4c38ecc0ff05408ba2ca9ae324dcfb5e, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1731436392598 2024-11-12T18:33:14,673 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3e4ae5b93636443f908de243565814fb, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1731436392621 2024-11-12T18:33:14,676 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/f880171eeb124c19b5d153f256d99fb8 is 1080, key is row0194/info:/1731436394648/Put/seqid=0 2024-11-12T18:33:14,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741866_1042 (size=21165) 2024-11-12T18:33:14,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741866_1042 (size=21165) 2024-11-12T18:33:14,682 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=266 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/f880171eeb124c19b5d153f256d99fb8 2024-11-12T18:33:14,686 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e0d5726950b880e6329801d9c1d23318#info#compaction#80 average throughput is 45.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T18:33:14,686 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/9f9f6d6c6ea74909804ce43aa7d63047 is 1080, key is row0062/info:/1731436354256/Put/seqid=0 2024-11-12T18:33:14,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/f880171eeb124c19b5d153f256d99fb8 as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/f880171eeb124c19b5d153f256d99fb8 2024-11-12T18:33:14,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741867_1043 (size=148311) 2024-11-12T18:33:14,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741867_1043 (size=148311) 2024-11-12T18:33:14,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/f880171eeb124c19b5d153f256d99fb8, entries=15, sequenceid=266, filesize=20.7 K 2024-11-12T18:33:14,696 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=13.66 KB/13988 for e0d5726950b880e6329801d9c1d23318 in 25ms, sequenceid=266, compaction requested=false 2024-11-12T18:33:14,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0d5726950b880e6329801d9c1d23318: 2024-11-12T18:33:14,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] regionserver.HRegion(8855): Flush requested on e0d5726950b880e6329801d9c1d23318 2024-11-12T18:33:14,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0d5726950b880e6329801d9c1d23318 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-12T18:33:14,698 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/9f9f6d6c6ea74909804ce43aa7d63047 as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/9f9f6d6c6ea74909804ce43aa7d63047 2024-11-12T18:33:14,702 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/734ee92cae724500bb3524652a278379 is 1080, key is row0209/info:/1731436394672/Put/seqid=0 2024-11-12T18:33:14,704 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e0d5726950b880e6329801d9c1d23318/info of e0d5726950b880e6329801d9c1d23318 into 9f9f6d6c6ea74909804ce43aa7d63047(size=144.8 K), total size for store is 165.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T18:33:14,704 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e0d5726950b880e6329801d9c1d23318: 2024-11-12T18:33:14,704 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318., storeName=e0d5726950b880e6329801d9c1d23318/info, priority=13, startTime=1731436394670; duration=0sec 2024-11-12T18:33:14,704 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:33:14,704 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e0d5726950b880e6329801d9c1d23318:info 2024-11-12T18:33:14,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741868_1044 (size=20092) 2024-11-12T18:33:14,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741868_1044 (size=20092) 2024-11-12T18:33:14,708 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=283 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/734ee92cae724500bb3524652a278379 2024-11-12T18:33:14,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/734ee92cae724500bb3524652a278379 as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/734ee92cae724500bb3524652a278379 2024-11-12T18:33:14,718 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/734ee92cae724500bb3524652a278379, entries=14, sequenceid=283, filesize=19.6 K 2024-11-12T18:33:14,719 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=2.10 KB/2152 for e0d5726950b880e6329801d9c1d23318 in 22ms, sequenceid=283, compaction requested=true 2024-11-12T18:33:14,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0d5726950b880e6329801d9c1d23318: 2024-11-12T18:33:14,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e0d5726950b880e6329801d9c1d23318:info, priority=-2147483648, current under compaction store size is 1 2024-11-12T18:33:14,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:33:14,719 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T18:33:14,720 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 189568 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T18:33:14,721 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1541): e0d5726950b880e6329801d9c1d23318/info is initiating minor compaction (all files) 2024-11-12T18:33:14,721 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e0d5726950b880e6329801d9c1d23318/info in TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318. 2024-11-12T18:33:14,721 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/9f9f6d6c6ea74909804ce43aa7d63047, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/f880171eeb124c19b5d153f256d99fb8, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/734ee92cae724500bb3524652a278379] into tmpdir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp, totalSize=185.1 K 2024-11-12T18:33:14,721 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9f9f6d6c6ea74909804ce43aa7d63047, keycount=132, bloomtype=ROW, size=144.8 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1731436354256 2024-11-12T18:33:14,722 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting f880171eeb124c19b5d153f256d99fb8, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1731436394648 2024-11-12T18:33:14,722 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting 734ee92cae724500bb3524652a278379, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1731436394672 2024-11-12T18:33:14,734 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e0d5726950b880e6329801d9c1d23318#info#compaction#82 average throughput is 55.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T18:33:14,735 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/1341d24d35294b849e4945a10979c460 is 1080, key is row0062/info:/1731436354256/Put/seqid=0 2024-11-12T18:33:14,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741869_1045 (size=179706) 2024-11-12T18:33:14,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741869_1045 (size=179706) 2024-11-12T18:33:14,744 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/1341d24d35294b849e4945a10979c460 as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/1341d24d35294b849e4945a10979c460 2024-11-12T18:33:14,750 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e0d5726950b880e6329801d9c1d23318/info of e0d5726950b880e6329801d9c1d23318 into 1341d24d35294b849e4945a10979c460(size=175.5 K), total size for store is 175.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T18:33:14,750 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e0d5726950b880e6329801d9c1d23318: 2024-11-12T18:33:14,750 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318., storeName=e0d5726950b880e6329801d9c1d23318/info, priority=13, startTime=1731436394719; duration=0sec 2024-11-12T18:33:14,750 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:33:14,750 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e0d5726950b880e6329801d9c1d23318:info 2024-11-12T18:33:15,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:15,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:16,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:16,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:16,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] regionserver.HRegion(8855): Flush requested on e0d5726950b880e6329801d9c1d23318 2024-11-12T18:33:16,709 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0d5726950b880e6329801d9c1d23318 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-12T18:33:16,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/43fcf09b1fea4a9c99e1088c6595c0a1 is 1080, key is row0223/info:/1731436394698/Put/seqid=0 2024-11-12T18:33:16,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741870_1046 (size=12523) 2024-11-12T18:33:16,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741870_1046 (size=12523) 2024-11-12T18:33:16,720 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/43fcf09b1fea4a9c99e1088c6595c0a1 2024-11-12T18:33:16,726 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/43fcf09b1fea4a9c99e1088c6595c0a1 as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/43fcf09b1fea4a9c99e1088c6595c0a1 2024-11-12T18:33:16,731 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/43fcf09b1fea4a9c99e1088c6595c0a1, entries=7, sequenceid=295, filesize=12.2 K 2024-11-12T18:33:16,731 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for e0d5726950b880e6329801d9c1d23318 in 22ms, sequenceid=295, compaction requested=false 2024-11-12T18:33:16,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0d5726950b880e6329801d9c1d23318: 2024-11-12T18:33:16,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37187 {}] regionserver.HRegion(8855): Flush requested on e0d5726950b880e6329801d9c1d23318 2024-11-12T18:33:16,733 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0d5726950b880e6329801d9c1d23318 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-12T18:33:16,736 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/8ff259f074cd4c5f81cd9028d5fd6d53 is 1080, key is row0230/info:/1731436396710/Put/seqid=0 2024-11-12T18:33:16,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741871_1047 (size=21171) 2024-11-12T18:33:16,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741871_1047 (size=21171) 2024-11-12T18:33:16,755 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/8ff259f074cd4c5f81cd9028d5fd6d53 2024-11-12T18:33:16,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/8ff259f074cd4c5f81cd9028d5fd6d53 as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/8ff259f074cd4c5f81cd9028d5fd6d53 2024-11-12T18:33:16,765 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/8ff259f074cd4c5f81cd9028d5fd6d53, entries=15, sequenceid=313, filesize=20.7 K 2024-11-12T18:33:16,766 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=12.61 KB/12912 for e0d5726950b880e6329801d9c1d23318 in 33ms, sequenceid=313, compaction requested=true 2024-11-12T18:33:16,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0d5726950b880e6329801d9c1d23318: 2024-11-12T18:33:16,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e0d5726950b880e6329801d9c1d23318:info, priority=-2147483648, current under compaction store size is 1 2024-11-12T18:33:16,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:33:16,766 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-12T18:33:16,767 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 213400 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-12T18:33:16,767 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1541): e0d5726950b880e6329801d9c1d23318/info is initiating minor compaction (all files) 2024-11-12T18:33:16,768 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e0d5726950b880e6329801d9c1d23318/info in TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318. 2024-11-12T18:33:16,768 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/1341d24d35294b849e4945a10979c460, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/43fcf09b1fea4a9c99e1088c6595c0a1, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/8ff259f074cd4c5f81cd9028d5fd6d53] into tmpdir=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp, totalSize=208.4 K 2024-11-12T18:33:16,768 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1341d24d35294b849e4945a10979c460, keycount=161, bloomtype=ROW, size=175.5 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1731436354256 2024-11-12T18:33:16,768 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting 43fcf09b1fea4a9c99e1088c6595c0a1, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1731436394698 2024-11-12T18:33:16,769 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8ff259f074cd4c5f81cd9028d5fd6d53, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1731436396710 2024-11-12T18:33:16,780 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e0d5726950b880e6329801d9c1d23318#info#compaction#85 average throughput is 62.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-12T18:33:16,781 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/346d5409d0b740418b1a19ff0a6e0958 is 1080, key is row0062/info:/1731436354256/Put/seqid=0 2024-11-12T18:33:16,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741872_1048 (size=203554) 2024-11-12T18:33:16,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741872_1048 (size=203554) 2024-11-12T18:33:16,790 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/346d5409d0b740418b1a19ff0a6e0958 as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/346d5409d0b740418b1a19ff0a6e0958 2024-11-12T18:33:16,796 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e0d5726950b880e6329801d9c1d23318/info of e0d5726950b880e6329801d9c1d23318 into 346d5409d0b740418b1a19ff0a6e0958(size=198.8 K), total size for store is 198.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-12T18:33:16,796 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e0d5726950b880e6329801d9c1d23318: 2024-11-12T18:33:16,796 INFO [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318., storeName=e0d5726950b880e6329801d9c1d23318/info, priority=13, startTime=1731436396766; duration=0sec 2024-11-12T18:33:16,796 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-12T18:33:16,796 DEBUG [RS:0;9911683f163c:37187-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e0d5726950b880e6329801d9c1d23318:info 2024-11-12T18:33:17,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:17,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:18,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:18,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:18,752 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-12T18:33:18,753 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C37187%2C1731436341218.1731436398753 2024-11-12T18:33:18,759 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:18,759 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:18,759 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:18,759 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:18,759 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:18,760 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/WALs/9911683f163c,37187,1731436341218/9911683f163c%2C37187%2C1731436341218.1731436341602 with entries=308, filesize=307.10 KB; new WAL /user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/WALs/9911683f163c,37187,1731436341218/9911683f163c%2C37187%2C1731436341218.1731436398753 2024-11-12T18:33:18,760 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41125:41125),(127.0.0.1/127.0.0.1:42091:42091)] 2024-11-12T18:33:18,760 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/WALs/9911683f163c,37187,1731436341218/9911683f163c%2C37187%2C1731436341218.1731436341602 is not closed yet, will try archiving it next time 2024-11-12T18:33:18,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741833_1009 (size=314476) 2024-11-12T18:33:18,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741833_1009 (size=314476) 2024-11-12T18:33:18,765 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing e0d5726950b880e6329801d9c1d23318 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-12T18:33:18,769 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/17f977d3d3c84562bb3e77cb6cb1634d is 1080, key is row0245/info:/1731436396734/Put/seqid=0 2024-11-12T18:33:18,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741874_1050 (size=17918) 2024-11-12T18:33:18,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741874_1050 (size=17918) 2024-11-12T18:33:18,774 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/17f977d3d3c84562bb3e77cb6cb1634d 2024-11-12T18:33:18,779 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/.tmp/info/17f977d3d3c84562bb3e77cb6cb1634d as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/17f977d3d3c84562bb3e77cb6cb1634d 2024-11-12T18:33:18,783 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/17f977d3d3c84562bb3e77cb6cb1634d, entries=12, sequenceid=329, filesize=17.5 K 2024-11-12T18:33:18,785 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=0 B/0 for e0d5726950b880e6329801d9c1d23318 in 20ms, sequenceid=329, compaction requested=false 2024-11-12T18:33:18,785 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for e0d5726950b880e6329801d9c1d23318: 2024-11-12T18:33:18,785 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 714000faee660033616ec98ef128761f: 2024-11-12T18:33:18,785 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-12T18:33:18,789 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/.tmp/info/1cb330166a344c0d8e3c0344ac99a4cd is 193, key is TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318./info:regioninfo/1731436357042/Put/seqid=0 2024-11-12T18:33:18,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741875_1051 (size=6223) 2024-11-12T18:33:18,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741875_1051 (size=6223) 2024-11-12T18:33:18,794 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/.tmp/info/1cb330166a344c0d8e3c0344ac99a4cd 2024-11-12T18:33:18,799 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/.tmp/info/1cb330166a344c0d8e3c0344ac99a4cd as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/info/1cb330166a344c0d8e3c0344ac99a4cd 2024-11-12T18:33:18,804 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/info/1cb330166a344c0d8e3c0344ac99a4cd, entries=5, sequenceid=21, filesize=6.1 K 2024-11-12T18:33:18,805 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 19ms, sequenceid=21, compaction requested=false 2024-11-12T18:33:18,805 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-12T18:33:18,805 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C37187%2C1731436341218.1731436398805 2024-11-12T18:33:18,809 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:18,809 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:18,809 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:18,810 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:18,810 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:18,810 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/WALs/9911683f163c,37187,1731436341218/9911683f163c%2C37187%2C1731436341218.1731436398753 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/WALs/9911683f163c,37187,1731436341218/9911683f163c%2C37187%2C1731436341218.1731436398805 2024-11-12T18:33:18,810 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42091:42091),(127.0.0.1/127.0.0.1:41125:41125)] 2024-11-12T18:33:18,811 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/WALs/9911683f163c,37187,1731436341218/9911683f163c%2C37187%2C1731436341218.1731436398753 is not closed yet, will try archiving it next time 2024-11-12T18:33:18,811 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/WALs/9911683f163c,37187,1731436341218/9911683f163c%2C37187%2C1731436341218.1731436341602 to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/oldWALs/9911683f163c%2C37187%2C1731436341218.1731436341602 2024-11-12T18:33:18,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741873_1049 (size=731) 2024-11-12T18:33:18,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741873_1049 (size=731) 2024-11-12T18:33:18,811 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-12T18:33:18,813 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/WALs/9911683f163c,37187,1731436341218/9911683f163c%2C37187%2C1731436341218.1731436398753 to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/oldWALs/9911683f163c%2C37187%2C1731436341218.1731436398753 2024-11-12T18:33:18,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-12T18:33:18,912 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T18:33:18,912 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:33:18,912 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:33:18,912 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:33:18,912 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-12T18:33:18,912 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-12T18:33:18,912 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1967420030, stopped=false 2024-11-12T18:33:18,912 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=9911683f163c,38027,1731436341166 2024-11-12T18:33:18,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:33:18,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1003543f23e0001, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:33:18,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:33:18,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1003543f23e0001, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:33:18,914 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T18:33:18,915 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T18:33:18,915 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:33:18,915 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:33:18,915 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37187-0x1003543f23e0001, quorum=127.0.0.1:55721, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:33:18,915 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:33:18,915 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9911683f163c,37187,1731436341218' ***** 2024-11-12T18:33:18,915 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T18:33:18,916 INFO [RS:0;9911683f163c:37187 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T18:33:18,916 INFO [RS:0;9911683f163c:37187 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T18:33:18,916 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T18:33:18,916 INFO [RS:0;9911683f163c:37187 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T18:33:18,916 INFO [RS:0;9911683f163c:37187 {}] regionserver.HRegionServer(3091): Received CLOSE for e0d5726950b880e6329801d9c1d23318 2024-11-12T18:33:18,916 INFO [RS:0;9911683f163c:37187 {}] regionserver.HRegionServer(3091): Received CLOSE for 714000faee660033616ec98ef128761f 2024-11-12T18:33:18,916 INFO [RS:0;9911683f163c:37187 {}] regionserver.HRegionServer(959): stopping server 9911683f163c,37187,1731436341218 2024-11-12T18:33:18,916 INFO [RS:0;9911683f163c:37187 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:33:18,916 INFO [RS:0;9911683f163c:37187 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;9911683f163c:37187. 2024-11-12T18:33:18,916 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e0d5726950b880e6329801d9c1d23318, disabling compactions & flushes 2024-11-12T18:33:18,916 DEBUG [RS:0;9911683f163c:37187 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:33:18,916 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318. 2024-11-12T18:33:18,916 DEBUG [RS:0;9911683f163c:37187 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:33:18,916 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318. 2024-11-12T18:33:18,916 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318. after waiting 0 ms 2024-11-12T18:33:18,916 INFO [RS:0;9911683f163c:37187 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T18:33:18,916 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318. 2024-11-12T18:33:18,917 INFO [RS:0;9911683f163c:37187 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T18:33:18,917 INFO [RS:0;9911683f163c:37187 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T18:33:18,917 INFO [RS:0;9911683f163c:37187 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-12T18:33:18,917 INFO [RS:0;9911683f163c:37187 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-12T18:33:18,917 DEBUG [RS:0;9911683f163c:37187 {}] regionserver.HRegionServer(1325): Online Regions={e0d5726950b880e6329801d9c1d23318=TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318., 714000faee660033616ec98ef128761f=TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f., 1588230740=hbase:meta,,1.1588230740} 2024-11-12T18:33:18,917 DEBUG [RS:0;9911683f163c:37187 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 714000faee660033616ec98ef128761f, e0d5726950b880e6329801d9c1d23318 2024-11-12T18:33:18,917 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T18:33:18,917 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T18:33:18,917 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T18:33:18,917 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T18:33:18,917 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T18:33:18,917 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/16033eef92f948cd93e5d67f2a724fb0.1b3e80ef23618aa6d107df2581f000fc->hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/16033eef92f948cd93e5d67f2a724fb0-top, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/TestLogRolling-testLogRolling=1b3e80ef23618aa6d107df2581f000fc-3e02853b7f614ff0bf35f89f5cc65681, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/0632e6f0c4c04088a4392d47daa7c6dd, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/TestLogRolling-testLogRolling=1b3e80ef23618aa6d107df2581f000fc-5ac61515ec404d81b8aed4a3a13bd119, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/d34ed069374d4bc99d2d3aa5d4680dee, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/b919cf0f800449fc9380e7d007c38b9d, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/7c88bad61eaa4378b4350ed985af80ef, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/e95844c307a64303af97ed00bc200140, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/d63c5d1efee44f4a9cd9879d30019142, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/0af59ab058284ad6ba2249757791be82, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/436e4be9d9144bc085534bd23d0622eb, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/99fc00591efc477b87411c8bef54d2d1, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/cd48f1565c004cb4a8ed57cc98217f0b, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/4c38ecc0ff05408ba2ca9ae324dcfb5e, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/9f9f6d6c6ea74909804ce43aa7d63047, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/3e4ae5b93636443f908de243565814fb, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/f880171eeb124c19b5d153f256d99fb8, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/1341d24d35294b849e4945a10979c460, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/734ee92cae724500bb3524652a278379, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/43fcf09b1fea4a9c99e1088c6595c0a1, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/8ff259f074cd4c5f81cd9028d5fd6d53] to archive 2024-11-12T18:33:18,918 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T18:33:18,920 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/16033eef92f948cd93e5d67f2a724fb0.1b3e80ef23618aa6d107df2581f000fc to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/16033eef92f948cd93e5d67f2a724fb0.1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:33:18,921 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/TestLogRolling-testLogRolling=1b3e80ef23618aa6d107df2581f000fc-3e02853b7f614ff0bf35f89f5cc65681 to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/TestLogRolling-testLogRolling=1b3e80ef23618aa6d107df2581f000fc-3e02853b7f614ff0bf35f89f5cc65681 2024-11-12T18:33:18,921 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-12T18:33:18,922 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T18:33:18,922 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T18:33:18,922 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731436398917Running coprocessor pre-close hooks at 1731436398917Disabling compacts and flushes for region at 1731436398917Disabling writes for close at 1731436398917Writing region close event to WAL at 1731436398918 (+1 ms)Running coprocessor post-close hooks at 1731436398922 (+4 ms)Closed at 1731436398922 2024-11-12T18:33:18,922 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-12T18:33:18,922 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/0632e6f0c4c04088a4392d47daa7c6dd to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/0632e6f0c4c04088a4392d47daa7c6dd 2024-11-12T18:33:18,923 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/TestLogRolling-testLogRolling=1b3e80ef23618aa6d107df2581f000fc-5ac61515ec404d81b8aed4a3a13bd119 to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/TestLogRolling-testLogRolling=1b3e80ef23618aa6d107df2581f000fc-5ac61515ec404d81b8aed4a3a13bd119 2024-11-12T18:33:18,924 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/d34ed069374d4bc99d2d3aa5d4680dee to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/d34ed069374d4bc99d2d3aa5d4680dee 2024-11-12T18:33:18,925 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/b919cf0f800449fc9380e7d007c38b9d to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/b919cf0f800449fc9380e7d007c38b9d 2024-11-12T18:33:18,926 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/7c88bad61eaa4378b4350ed985af80ef to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/7c88bad61eaa4378b4350ed985af80ef 2024-11-12T18:33:18,927 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/e95844c307a64303af97ed00bc200140 to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/e95844c307a64303af97ed00bc200140 2024-11-12T18:33:18,928 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/d63c5d1efee44f4a9cd9879d30019142 to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/d63c5d1efee44f4a9cd9879d30019142 2024-11-12T18:33:18,929 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/0af59ab058284ad6ba2249757791be82 to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/0af59ab058284ad6ba2249757791be82 2024-11-12T18:33:18,930 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/436e4be9d9144bc085534bd23d0622eb to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/436e4be9d9144bc085534bd23d0622eb 2024-11-12T18:33:18,931 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/99fc00591efc477b87411c8bef54d2d1 to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/99fc00591efc477b87411c8bef54d2d1 2024-11-12T18:33:18,932 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/cd48f1565c004cb4a8ed57cc98217f0b to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/cd48f1565c004cb4a8ed57cc98217f0b 2024-11-12T18:33:18,933 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/4c38ecc0ff05408ba2ca9ae324dcfb5e to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/4c38ecc0ff05408ba2ca9ae324dcfb5e 2024-11-12T18:33:18,934 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/9f9f6d6c6ea74909804ce43aa7d63047 to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/9f9f6d6c6ea74909804ce43aa7d63047 2024-11-12T18:33:18,935 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/3e4ae5b93636443f908de243565814fb to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/3e4ae5b93636443f908de243565814fb 2024-11-12T18:33:18,936 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/f880171eeb124c19b5d153f256d99fb8 to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/f880171eeb124c19b5d153f256d99fb8 2024-11-12T18:33:18,937 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/1341d24d35294b849e4945a10979c460 to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/1341d24d35294b849e4945a10979c460 2024-11-12T18:33:18,938 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/734ee92cae724500bb3524652a278379 to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/734ee92cae724500bb3524652a278379 2024-11-12T18:33:18,939 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/43fcf09b1fea4a9c99e1088c6595c0a1 to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/43fcf09b1fea4a9c99e1088c6595c0a1 2024-11-12T18:33:18,940 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/8ff259f074cd4c5f81cd9028d5fd6d53 to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/info/8ff259f074cd4c5f81cd9028d5fd6d53 2024-11-12T18:33:18,940 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=9911683f163c:38027 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-12T18:33:18,941 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [0632e6f0c4c04088a4392d47daa7c6dd=40830, d34ed069374d4bc99d2d3aa5d4680dee=12513, b919cf0f800449fc9380e7d007c38b9d=64713, 7c88bad61eaa4378b4350ed985af80ef=21156, e95844c307a64303af97ed00bc200140=19000, d63c5d1efee44f4a9cd9879d30019142=93998, 0af59ab058284ad6ba2249757791be82=20078, 436e4be9d9144bc085534bd23d0622eb=21156, 99fc00591efc477b87411c8bef54d2d1=117820, cd48f1565c004cb4a8ed57cc98217f0b=12516, 4c38ecc0ff05408ba2ca9ae324dcfb5e=20078, 9f9f6d6c6ea74909804ce43aa7d63047=148311, 3e4ae5b93636443f908de243565814fb=20078, f880171eeb124c19b5d153f256d99fb8=21165, 1341d24d35294b849e4945a10979c460=179706, 734ee92cae724500bb3524652a278379=20092, 43fcf09b1fea4a9c99e1088c6595c0a1=12523, 8ff259f074cd4c5f81cd9028d5fd6d53=21171] 2024-11-12T18:33:18,944 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/e0d5726950b880e6329801d9c1d23318/recovered.edits/332.seqid, newMaxSeqId=332, maxSeqId=121 2024-11-12T18:33:18,945 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318. 2024-11-12T18:33:18,945 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e0d5726950b880e6329801d9c1d23318: Waiting for close lock at 1731436398916Running coprocessor pre-close hooks at 1731436398916Disabling compacts and flushes for region at 1731436398916Disabling writes for close at 1731436398916Writing region close event to WAL at 1731436398941 (+25 ms)Running coprocessor post-close hooks at 1731436398945 (+4 ms)Closed at 1731436398945 2024-11-12T18:33:18,945 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731436356338.e0d5726950b880e6329801d9c1d23318. 2024-11-12T18:33:18,945 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 714000faee660033616ec98ef128761f, disabling compactions & flushes 2024-11-12T18:33:18,945 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f. 2024-11-12T18:33:18,945 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f. 2024-11-12T18:33:18,945 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f. after waiting 0 ms 2024-11-12T18:33:18,945 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f. 2024-11-12T18:33:18,946 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/714000faee660033616ec98ef128761f/info/16033eef92f948cd93e5d67f2a724fb0.1b3e80ef23618aa6d107df2581f000fc->hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/1b3e80ef23618aa6d107df2581f000fc/info/16033eef92f948cd93e5d67f2a724fb0-bottom] to archive 2024-11-12T18:33:18,946 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-12T18:33:18,948 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/714000faee660033616ec98ef128761f/info/16033eef92f948cd93e5d67f2a724fb0.1b3e80ef23618aa6d107df2581f000fc to hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/archive/data/default/TestLogRolling-testLogRolling/714000faee660033616ec98ef128761f/info/16033eef92f948cd93e5d67f2a724fb0.1b3e80ef23618aa6d107df2581f000fc 2024-11-12T18:33:18,948 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-12T18:33:18,951 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/data/default/TestLogRolling-testLogRolling/714000faee660033616ec98ef128761f/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=121 2024-11-12T18:33:18,952 INFO [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f. 2024-11-12T18:33:18,952 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 714000faee660033616ec98ef128761f: Waiting for close lock at 1731436398945Running coprocessor pre-close hooks at 1731436398945Disabling compacts and flushes for region at 1731436398945Disabling writes for close at 1731436398945Writing region close event to WAL at 1731436398948 (+3 ms)Running coprocessor post-close hooks at 1731436398952 (+4 ms)Closed at 1731436398952 2024-11-12T18:33:18,952 DEBUG [RS_CLOSE_REGION-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731436356338.714000faee660033616ec98ef128761f. 2024-11-12T18:33:19,117 INFO [RS:0;9911683f163c:37187 {}] regionserver.HRegionServer(976): stopping server 9911683f163c,37187,1731436341218; all regions closed. 2024-11-12T18:33:19,118 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:19,118 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:19,118 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:19,118 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:19,118 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:19,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741834_1010 (size=8107) 2024-11-12T18:33:19,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741834_1010 (size=8107) 2024-11-12T18:33:19,122 DEBUG [RS:0;9911683f163c:37187 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/oldWALs 2024-11-12T18:33:19,123 INFO [RS:0;9911683f163c:37187 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9911683f163c%2C37187%2C1731436341218.meta:.meta(num 1731436341971) 2024-11-12T18:33:19,123 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:19,123 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:19,123 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:19,123 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:19,123 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:19,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741876_1052 (size=778) 2024-11-12T18:33:19,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741876_1052 (size=778) 2024-11-12T18:33:19,127 DEBUG [RS:0;9911683f163c:37187 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/oldWALs 2024-11-12T18:33:19,127 INFO [RS:0;9911683f163c:37187 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9911683f163c%2C37187%2C1731436341218:(num 1731436398805) 2024-11-12T18:33:19,127 DEBUG [RS:0;9911683f163c:37187 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:33:19,127 INFO [RS:0;9911683f163c:37187 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:33:19,127 INFO [RS:0;9911683f163c:37187 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:33:19,127 INFO [RS:0;9911683f163c:37187 {}] hbase.ChoreService(370): Chore service for: regionserver/9911683f163c:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-12T18:33:19,128 INFO [RS:0;9911683f163c:37187 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:33:19,128 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:33:19,128 INFO [RS:0;9911683f163c:37187 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37187 2024-11-12T18:33:19,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1003543f23e0001, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9911683f163c,37187,1731436341218 2024-11-12T18:33:19,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:33:19,130 INFO [RS:0;9911683f163c:37187 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:33:19,131 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9911683f163c,37187,1731436341218] 2024-11-12T18:33:19,132 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9911683f163c,37187,1731436341218 already deleted, retry=false 2024-11-12T18:33:19,132 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9911683f163c,37187,1731436341218 expired; onlineServers=0 2024-11-12T18:33:19,133 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '9911683f163c,38027,1731436341166' ***** 2024-11-12T18:33:19,133 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-12T18:33:19,133 INFO [M:0;9911683f163c:38027 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:33:19,133 INFO [M:0;9911683f163c:38027 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:33:19,133 DEBUG [M:0;9911683f163c:38027 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-12T18:33:19,133 DEBUG [M:0;9911683f163c:38027 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-12T18:33:19,133 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-12T18:33:19,133 DEBUG [master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436341375 {}] cleaner.HFileCleaner(306): Exit Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436341375,5,FailOnTimeoutGroup] 2024-11-12T18:33:19,133 DEBUG [master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436341375 {}] cleaner.HFileCleaner(306): Exit Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436341375,5,FailOnTimeoutGroup] 2024-11-12T18:33:19,133 INFO [M:0;9911683f163c:38027 {}] hbase.ChoreService(370): Chore service for: master/9911683f163c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-12T18:33:19,133 INFO [M:0;9911683f163c:38027 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:33:19,133 DEBUG [M:0;9911683f163c:38027 {}] master.HMaster(1795): Stopping service threads 2024-11-12T18:33:19,133 INFO [M:0;9911683f163c:38027 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-12T18:33:19,133 INFO [M:0;9911683f163c:38027 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T18:33:19,134 INFO [M:0;9911683f163c:38027 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-12T18:33:19,134 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-12T18:33:19,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-12T18:33:19,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:33:19,134 DEBUG [M:0;9911683f163c:38027 {}] zookeeper.ZKUtil(347): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-12T18:33:19,134 WARN [M:0;9911683f163c:38027 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-12T18:33:19,135 INFO [M:0;9911683f163c:38027 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/.lastflushedseqids 2024-11-12T18:33:19,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741877_1053 (size=228) 2024-11-12T18:33:19,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741877_1053 (size=228) 2024-11-12T18:33:19,140 INFO [M:0;9911683f163c:38027 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-12T18:33:19,141 INFO [M:0;9911683f163c:38027 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-12T18:33:19,141 DEBUG [M:0;9911683f163c:38027 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T18:33:19,141 INFO [M:0;9911683f163c:38027 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:33:19,141 DEBUG [M:0;9911683f163c:38027 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:33:19,141 DEBUG [M:0;9911683f163c:38027 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T18:33:19,141 DEBUG [M:0;9911683f163c:38027 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:33:19,141 INFO [M:0;9911683f163c:38027 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-12T18:33:19,157 DEBUG [M:0;9911683f163c:38027 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ad35ea768db84a67817db01980e8e81e is 82, key is hbase:meta,,1/info:regioninfo/1731436341994/Put/seqid=0 2024-11-12T18:33:19,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741878_1054 (size=5672) 2024-11-12T18:33:19,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741878_1054 (size=5672) 2024-11-12T18:33:19,161 INFO [M:0;9911683f163c:38027 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ad35ea768db84a67817db01980e8e81e 2024-11-12T18:33:19,188 DEBUG [M:0;9911683f163c:38027 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e42c4629a8614e0396913670cc7ec5c4 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731436342415/Put/seqid=0 2024-11-12T18:33:19,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741879_1055 (size=7090) 2024-11-12T18:33:19,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741879_1055 (size=7090) 2024-11-12T18:33:19,193 INFO [M:0;9911683f163c:38027 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e42c4629a8614e0396913670cc7ec5c4 2024-11-12T18:33:19,197 INFO [M:0;9911683f163c:38027 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e42c4629a8614e0396913670cc7ec5c4 2024-11-12T18:33:19,212 DEBUG [M:0;9911683f163c:38027 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2d4351255d994071af50e845b1ff1408 is 69, key is 9911683f163c,37187,1731436341218/rs:state/1731436341452/Put/seqid=0 2024-11-12T18:33:19,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741880_1056 (size=5156) 2024-11-12T18:33:19,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741880_1056 (size=5156) 2024-11-12T18:33:19,217 INFO [M:0;9911683f163c:38027 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2d4351255d994071af50e845b1ff1408 2024-11-12T18:33:19,232 INFO [RS:0;9911683f163c:37187 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:33:19,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1003543f23e0001, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:33:19,232 INFO [RS:0;9911683f163c:37187 {}] regionserver.HRegionServer(1031): Exiting; stopping=9911683f163c,37187,1731436341218; zookeeper connection closed. 2024-11-12T18:33:19,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37187-0x1003543f23e0001, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:33:19,232 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4fed3a3d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4fed3a3d 2024-11-12T18:33:19,232 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-12T18:33:19,236 DEBUG [M:0;9911683f163c:38027 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c6aa3dc4dae14f9f9b6a4dfa37b3834c is 52, key is load_balancer_on/state:d/1731436342043/Put/seqid=0 2024-11-12T18:33:19,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741881_1057 (size=5056) 2024-11-12T18:33:19,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741881_1057 (size=5056) 2024-11-12T18:33:19,241 INFO [M:0;9911683f163c:38027 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c6aa3dc4dae14f9f9b6a4dfa37b3834c 2024-11-12T18:33:19,246 DEBUG [M:0;9911683f163c:38027 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ad35ea768db84a67817db01980e8e81e as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ad35ea768db84a67817db01980e8e81e 2024-11-12T18:33:19,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:19,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:19,250 INFO [M:0;9911683f163c:38027 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ad35ea768db84a67817db01980e8e81e, entries=8, sequenceid=125, filesize=5.5 K 2024-11-12T18:33:19,251 DEBUG [M:0;9911683f163c:38027 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e42c4629a8614e0396913670cc7ec5c4 as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e42c4629a8614e0396913670cc7ec5c4 2024-11-12T18:33:19,256 INFO [M:0;9911683f163c:38027 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e42c4629a8614e0396913670cc7ec5c4 2024-11-12T18:33:19,256 INFO [M:0;9911683f163c:38027 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e42c4629a8614e0396913670cc7ec5c4, entries=13, sequenceid=125, filesize=6.9 K 2024-11-12T18:33:19,257 DEBUG [M:0;9911683f163c:38027 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2d4351255d994071af50e845b1ff1408 as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2d4351255d994071af50e845b1ff1408 2024-11-12T18:33:19,261 INFO [M:0;9911683f163c:38027 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2d4351255d994071af50e845b1ff1408, entries=1, sequenceid=125, filesize=5.0 K 2024-11-12T18:33:19,262 DEBUG [M:0;9911683f163c:38027 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c6aa3dc4dae14f9f9b6a4dfa37b3834c as hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c6aa3dc4dae14f9f9b6a4dfa37b3834c 2024-11-12T18:33:19,266 INFO [M:0;9911683f163c:38027 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38837/user/jenkins/test-data/46a2e43a-e75f-32a9-5046-385c79537110/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c6aa3dc4dae14f9f9b6a4dfa37b3834c, entries=1, sequenceid=125, filesize=4.9 K 2024-11-12T18:33:19,267 INFO [M:0;9911683f163c:38027 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=125, compaction requested=false 2024-11-12T18:33:19,269 INFO [M:0;9911683f163c:38027 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:33:19,269 DEBUG [M:0;9911683f163c:38027 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731436399141Disabling compacts and flushes for region at 1731436399141Disabling writes for close at 1731436399141Obtaining lock to block concurrent updates at 1731436399141Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731436399141Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1731436399141Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731436399142 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731436399142Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731436399156 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731436399156Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731436399166 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731436399187 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731436399187Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731436399197 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731436399211 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731436399211Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731436399221 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731436399235 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731436399235Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79bf6c8d: reopening flushed file at 1731436399245 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8932198: reopening flushed file at 1731436399251 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5af371ad: reopening flushed file at 1731436399256 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@77e85802: reopening flushed file at 1731436399261 (+5 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=125, compaction requested=false at 1731436399267 (+6 ms)Writing region close event to WAL at 1731436399269 (+2 ms)Closed at 1731436399269 2024-11-12T18:33:19,269 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:19,269 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:19,270 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:19,270 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:19,270 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:19,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38811 is added to blk_1073741830_1006 (size=61320) 2024-11-12T18:33:19,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36307 is added to blk_1073741830_1006 (size=61320) 2024-11-12T18:33:19,273 INFO [M:0;9911683f163c:38027 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-12T18:33:19,273 INFO [M:0;9911683f163c:38027 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38027 2024-11-12T18:33:19,273 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:33:19,273 INFO [M:0;9911683f163c:38027 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:33:19,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:33:19,375 INFO [M:0;9911683f163c:38027 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:33:19,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38027-0x1003543f23e0000, quorum=127.0.0.1:55721, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:33:19,378 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@9c38101{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:33:19,378 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3b1137c4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:33:19,378 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:33:19,378 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1abf8fe3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:33:19,378 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@738488a9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/hadoop.log.dir/,STOPPED} 2024-11-12T18:33:19,380 WARN [BP-8344189-172.17.0.3-1731436340498 heartbeating to localhost/127.0.0.1:38837 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:33:19,380 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:33:19,380 WARN [BP-8344189-172.17.0.3-1731436340498 heartbeating to localhost/127.0.0.1:38837 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-8344189-172.17.0.3-1731436340498 (Datanode Uuid 38bc28a8-4c38-4a4a-bca6-91cb8af14694) service to localhost/127.0.0.1:38837 2024-11-12T18:33:19,380 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:33:19,381 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/cluster_ac05d1ae-a92b-0c6a-4208-0cf67ca7ffce/data/data3/current/BP-8344189-172.17.0.3-1731436340498 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:33:19,381 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/cluster_ac05d1ae-a92b-0c6a-4208-0cf67ca7ffce/data/data4/current/BP-8344189-172.17.0.3-1731436340498 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:33:19,381 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:33:19,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@16c2050e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:33:19,383 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@391d7ae4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:33:19,383 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:33:19,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e62dbc1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:33:19,384 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e1bddd6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/hadoop.log.dir/,STOPPED} 2024-11-12T18:33:19,385 WARN [BP-8344189-172.17.0.3-1731436340498 heartbeating to localhost/127.0.0.1:38837 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:33:19,385 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:33:19,385 WARN [BP-8344189-172.17.0.3-1731436340498 heartbeating to localhost/127.0.0.1:38837 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-8344189-172.17.0.3-1731436340498 (Datanode Uuid 53fb17a5-9ac2-430a-aa14-7f7814cd5b61) service to localhost/127.0.0.1:38837 2024-11-12T18:33:19,385 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:33:19,386 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/cluster_ac05d1ae-a92b-0c6a-4208-0cf67ca7ffce/data/data1/current/BP-8344189-172.17.0.3-1731436340498 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:33:19,386 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/cluster_ac05d1ae-a92b-0c6a-4208-0cf67ca7ffce/data/data2/current/BP-8344189-172.17.0.3-1731436340498 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:33:19,386 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:33:19,392 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1a5cc73{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T18:33:19,393 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1d00cf8f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:33:19,393 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:33:19,393 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f4c5c16{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:33:19,393 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@580f3be3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/hadoop.log.dir/,STOPPED} 2024-11-12T18:33:19,400 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-12T18:33:19,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-12T18:33:19,440 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=230 (was 206) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38837 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (122747033) connection to localhost/127.0.0.1:38837 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38837 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38837 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38837 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (122747033) connection to localhost/127.0.0.1:38837 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:38837 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (122747033) connection to localhost/127.0.0.1:38837 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=515 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=44 (was 91), ProcessCount=11 (was 11), AvailableMemoryMB=6291 (was 6351) 2024-11-12T18:33:19,450 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=230, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=44, ProcessCount=11, AvailableMemoryMB=6291 2024-11-12T18:33:19,451 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-12T18:33:19,451 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/hadoop.log.dir so I do NOT create it in target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1 2024-11-12T18:33:19,451 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492874b9-8416-15c7-e6e5-8735fb57c248/hadoop.tmp.dir so I do NOT create it in target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1 2024-11-12T18:33:19,451 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/cluster_dd51cd91-0d24-fd5d-5373-e2bd48afe9fe, deleteOnExit=true 2024-11-12T18:33:19,451 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-12T18:33:19,452 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/test.cache.data in system properties and HBase conf 2024-11-12T18:33:19,452 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/hadoop.tmp.dir in system properties and HBase conf 2024-11-12T18:33:19,452 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/hadoop.log.dir in system properties and HBase conf 2024-11-12T18:33:19,452 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-12T18:33:19,452 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-12T18:33:19,452 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-12T18:33:19,452 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-12T18:33:19,452 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-12T18:33:19,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-12T18:33:19,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-12T18:33:19,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T18:33:19,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-12T18:33:19,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-12T18:33:19,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T18:33:19,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T18:33:19,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-12T18:33:19,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/nfs.dump.dir in system properties and HBase conf 2024-11-12T18:33:19,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/java.io.tmpdir in system properties and HBase conf 2024-11-12T18:33:19,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T18:33:19,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-12T18:33:19,454 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-12T18:33:19,468 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-12T18:33:19,469 INFO [regionserver/9911683f163c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:33:19,524 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:33:19,528 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:33:19,536 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:33:19,537 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:33:19,537 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:33:19,538 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:33:19,538 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b67b6af{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:33:19,539 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e982213{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:33:19,585 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T18:33:19,585 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-12T18:33:19,586 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-12T18:33:19,586 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-12T18:33:19,660 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7667b0c0{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/java.io.tmpdir/jetty-localhost-39447-hadoop-hdfs-3_4_1-tests_jar-_-any-596284856929752794/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T18:33:19,661 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@319a0a77{HTTP/1.1, (http/1.1)}{localhost:39447} 2024-11-12T18:33:19,661 INFO [Time-limited test {}] server.Server(415): Started @295423ms 2024-11-12T18:33:19,674 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-12T18:33:19,738 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:33:19,740 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:33:19,741 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:33:19,741 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:33:19,741 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:33:19,741 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@382146a9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:33:19,742 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@470bba0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:33:19,856 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5eb115e2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/java.io.tmpdir/jetty-localhost-34055-hadoop-hdfs-3_4_1-tests_jar-_-any-4702353500039152664/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:33:19,856 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3faa96fd{HTTP/1.1, (http/1.1)}{localhost:34055} 2024-11-12T18:33:19,856 INFO [Time-limited test {}] server.Server(415): Started @295618ms 2024-11-12T18:33:19,858 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:33:19,887 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T18:33:19,890 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T18:33:19,891 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T18:33:19,891 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T18:33:19,891 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T18:33:19,891 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7923b539{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/hadoop.log.dir/,AVAILABLE} 2024-11-12T18:33:19,891 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e50120a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T18:33:19,947 WARN [Thread-2453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/cluster_dd51cd91-0d24-fd5d-5373-e2bd48afe9fe/data/data2/current/BP-211007062-172.17.0.3-1731436399474/current, will proceed with Du for space computation calculation, 2024-11-12T18:33:19,947 WARN [Thread-2452 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/cluster_dd51cd91-0d24-fd5d-5373-e2bd48afe9fe/data/data1/current/BP-211007062-172.17.0.3-1731436399474/current, will proceed with Du for space computation calculation, 2024-11-12T18:33:19,967 WARN [Thread-2431 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:33:19,970 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbb4c1d3f79297c2 with lease ID 0x647f550798f827c8: Processing first storage report for DS-d41cdb18-9bab-4477-92f0-1e376c616a2e from datanode DatanodeRegistration(127.0.0.1:39169, datanodeUuid=8a5e2625-b93a-4d0c-ba5f-909f155ce126, infoPort=35545, infoSecurePort=0, ipcPort=40829, storageInfo=lv=-57;cid=testClusterID;nsid=307411234;c=1731436399474) 2024-11-12T18:33:19,970 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbb4c1d3f79297c2 with lease ID 0x647f550798f827c8: from storage DS-d41cdb18-9bab-4477-92f0-1e376c616a2e node DatanodeRegistration(127.0.0.1:39169, datanodeUuid=8a5e2625-b93a-4d0c-ba5f-909f155ce126, infoPort=35545, infoSecurePort=0, ipcPort=40829, storageInfo=lv=-57;cid=testClusterID;nsid=307411234;c=1731436399474), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-12T18:33:19,970 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbb4c1d3f79297c2 with lease ID 0x647f550798f827c8: Processing first storage report for DS-2d4443eb-05e6-4ea1-b483-ba8648496fa0 from datanode DatanodeRegistration(127.0.0.1:39169, datanodeUuid=8a5e2625-b93a-4d0c-ba5f-909f155ce126, infoPort=35545, infoSecurePort=0, ipcPort=40829, storageInfo=lv=-57;cid=testClusterID;nsid=307411234;c=1731436399474) 2024-11-12T18:33:19,970 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbb4c1d3f79297c2 with lease ID 0x647f550798f827c8: from storage DS-2d4443eb-05e6-4ea1-b483-ba8648496fa0 node DatanodeRegistration(127.0.0.1:39169, datanodeUuid=8a5e2625-b93a-4d0c-ba5f-909f155ce126, infoPort=35545, infoSecurePort=0, ipcPort=40829, storageInfo=lv=-57;cid=testClusterID;nsid=307411234;c=1731436399474), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:33:20,006 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@50573d5f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/java.io.tmpdir/jetty-localhost-46247-hadoop-hdfs-3_4_1-tests_jar-_-any-7941322981197815640/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:33:20,007 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@374ad87b{HTTP/1.1, (http/1.1)}{localhost:46247} 2024-11-12T18:33:20,007 INFO [Time-limited test {}] server.Server(415): Started @295769ms 2024-11-12T18:33:20,008 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T18:33:20,093 WARN [Thread-2478 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/cluster_dd51cd91-0d24-fd5d-5373-e2bd48afe9fe/data/data3/current/BP-211007062-172.17.0.3-1731436399474/current, will proceed with Du for space computation calculation, 2024-11-12T18:33:20,093 WARN [Thread-2479 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/cluster_dd51cd91-0d24-fd5d-5373-e2bd48afe9fe/data/data4/current/BP-211007062-172.17.0.3-1731436399474/current, will proceed with Du for space computation calculation, 2024-11-12T18:33:20,117 WARN [Thread-2467 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T18:33:20,119 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x25758a35c8172040 with lease ID 0x647f550798f827c9: Processing first storage report for DS-599edb41-27bc-4f3c-800f-219fe0ebba78 from datanode DatanodeRegistration(127.0.0.1:40861, datanodeUuid=5c34f5a4-4000-4250-9279-49f7e87bdcb0, infoPort=40347, infoSecurePort=0, ipcPort=42807, storageInfo=lv=-57;cid=testClusterID;nsid=307411234;c=1731436399474) 2024-11-12T18:33:20,119 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x25758a35c8172040 with lease ID 0x647f550798f827c9: from storage DS-599edb41-27bc-4f3c-800f-219fe0ebba78 node DatanodeRegistration(127.0.0.1:40861, datanodeUuid=5c34f5a4-4000-4250-9279-49f7e87bdcb0, infoPort=40347, infoSecurePort=0, ipcPort=42807, storageInfo=lv=-57;cid=testClusterID;nsid=307411234;c=1731436399474), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:33:20,119 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x25758a35c8172040 with lease ID 0x647f550798f827c9: Processing first storage report for DS-2b3b8036-b2ee-47e9-ba32-2e87a6635d6b from datanode DatanodeRegistration(127.0.0.1:40861, datanodeUuid=5c34f5a4-4000-4250-9279-49f7e87bdcb0, infoPort=40347, infoSecurePort=0, ipcPort=42807, storageInfo=lv=-57;cid=testClusterID;nsid=307411234;c=1731436399474) 2024-11-12T18:33:20,119 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x25758a35c8172040 with lease ID 0x647f550798f827c9: from storage DS-2b3b8036-b2ee-47e9-ba32-2e87a6635d6b node DatanodeRegistration(127.0.0.1:40861, datanodeUuid=5c34f5a4-4000-4250-9279-49f7e87bdcb0, infoPort=40347, infoSecurePort=0, ipcPort=42807, storageInfo=lv=-57;cid=testClusterID;nsid=307411234;c=1731436399474), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T18:33:20,129 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1 2024-11-12T18:33:20,132 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/cluster_dd51cd91-0d24-fd5d-5373-e2bd48afe9fe/zookeeper_0, clientPort=63091, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/cluster_dd51cd91-0d24-fd5d-5373-e2bd48afe9fe/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/cluster_dd51cd91-0d24-fd5d-5373-e2bd48afe9fe/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-12T18:33:20,133 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63091 2024-11-12T18:33:20,133 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:33:20,134 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:33:20,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40861 is added to blk_1073741825_1001 (size=7) 2024-11-12T18:33:20,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39169 is added to blk_1073741825_1001 (size=7) 2024-11-12T18:33:20,143 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722 with version=8 2024-11-12T18:33:20,143 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36309/user/jenkins/test-data/826b45c4-4209-1bfd-3dbb-f8467f3dd70e/hbase-staging 2024-11-12T18:33:20,145 INFO [Time-limited test {}] client.ConnectionUtils(128): master/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:33:20,145 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:33:20,145 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:33:20,145 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:33:20,145 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:33:20,145 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:33:20,145 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-12T18:33:20,146 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:33:20,146 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41935 2024-11-12T18:33:20,147 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41935 connecting to ZooKeeper ensemble=127.0.0.1:63091 2024-11-12T18:33:20,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:419350x0, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:33:20,154 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41935-0x1003544d8a20000 connected 2024-11-12T18:33:20,165 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:33:20,166 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:33:20,168 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:33:20,168 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722, hbase.cluster.distributed=false 2024-11-12T18:33:20,170 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:33:20,170 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41935 2024-11-12T18:33:20,170 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41935 2024-11-12T18:33:20,170 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41935 2024-11-12T18:33:20,171 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41935 2024-11-12T18:33:20,171 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41935 2024-11-12T18:33:20,187 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9911683f163c:0 server-side Connection retries=45 2024-11-12T18:33:20,187 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:33:20,187 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T18:33:20,187 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T18:33:20,187 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T18:33:20,187 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T18:33:20,187 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T18:33:20,187 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T18:33:20,188 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45743 2024-11-12T18:33:20,189 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45743 connecting to ZooKeeper ensemble=127.0.0.1:63091 2024-11-12T18:33:20,189 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:33:20,191 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:33:20,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:457430x0, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T18:33:20,195 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45743-0x1003544d8a20001 connected 2024-11-12T18:33:20,195 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:33:20,196 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T18:33:20,196 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T18:33:20,197 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T18:33:20,198 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T18:33:20,198 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45743 2024-11-12T18:33:20,198 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45743 2024-11-12T18:33:20,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45743 2024-11-12T18:33:20,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45743 2024-11-12T18:33:20,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45743 2024-11-12T18:33:20,211 DEBUG [M:0;9911683f163c:41935 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;9911683f163c:41935 2024-11-12T18:33:20,211 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/9911683f163c,41935,1731436400145 2024-11-12T18:33:20,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:33:20,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:33:20,214 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/9911683f163c,41935,1731436400145 2024-11-12T18:33:20,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T18:33:20,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:33:20,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:33:20,216 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-12T18:33:20,216 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/9911683f163c,41935,1731436400145 from backup master directory 2024-11-12T18:33:20,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/9911683f163c,41935,1731436400145 2024-11-12T18:33:20,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:33:20,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T18:33:20,217 WARN [master/9911683f163c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:33:20,217 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=9911683f163c,41935,1731436400145 2024-11-12T18:33:20,221 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/hbase.id] with ID: b9d72334-7bbf-408f-8986-2c6e34bb93ee 2024-11-12T18:33:20,221 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/.tmp/hbase.id 2024-11-12T18:33:20,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39169 is added to blk_1073741826_1002 (size=42) 2024-11-12T18:33:20,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40861 is added to blk_1073741826_1002 (size=42) 2024-11-12T18:33:20,226 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/.tmp/hbase.id]:[hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/hbase.id] 2024-11-12T18:33:20,236 INFO [master/9911683f163c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:33:20,236 INFO [master/9911683f163c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-12T18:33:20,238 INFO [master/9911683f163c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-12T18:33:20,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:33:20,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:33:20,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40861 is added to blk_1073741827_1003 (size=196) 2024-11-12T18:33:20,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39169 is added to blk_1073741827_1003 (size=196) 2024-11-12T18:33:20,246 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T18:33:20,247 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-12T18:33:20,247 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:33:20,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:20,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:20,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40861 is added to blk_1073741828_1004 (size=1189) 2024-11-12T18:33:20,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39169 is added to blk_1073741828_1004 (size=1189) 2024-11-12T18:33:20,254 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store 2024-11-12T18:33:20,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40861 is added to blk_1073741829_1005 (size=34) 2024-11-12T18:33:20,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39169 is added to blk_1073741829_1005 (size=34) 2024-11-12T18:33:20,261 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:33:20,261 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T18:33:20,261 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:33:20,261 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:33:20,261 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T18:33:20,261 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:33:20,261 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:33:20,261 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731436400261Disabling compacts and flushes for region at 1731436400261Disabling writes for close at 1731436400261Writing region close event to WAL at 1731436400261Closed at 1731436400261 2024-11-12T18:33:20,262 WARN [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/.initializing 2024-11-12T18:33:20,262 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/WALs/9911683f163c,41935,1731436400145 2024-11-12T18:33:20,264 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C41935%2C1731436400145, suffix=, logDir=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/WALs/9911683f163c,41935,1731436400145, archiveDir=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/oldWALs, maxLogs=10 2024-11-12T18:33:20,264 INFO [master/9911683f163c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C41935%2C1731436400145.1731436400264 2024-11-12T18:33:20,269 INFO [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/WALs/9911683f163c,41935,1731436400145/9911683f163c%2C41935%2C1731436400145.1731436400264 2024-11-12T18:33:20,269 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35545:35545),(127.0.0.1/127.0.0.1:40347:40347)] 2024-11-12T18:33:20,273 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:33:20,273 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:33:20,273 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:33:20,273 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:33:20,274 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:33:20,275 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-12T18:33:20,275 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:33:20,276 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:33:20,276 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:33:20,277 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-12T18:33:20,277 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:33:20,277 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:33:20,278 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:33:20,279 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-12T18:33:20,279 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:33:20,279 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:33:20,279 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:33:20,280 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-12T18:33:20,280 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:33:20,280 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T18:33:20,280 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:33:20,281 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:33:20,281 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:33:20,282 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:33:20,282 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:33:20,283 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T18:33:20,284 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T18:33:20,286 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:33:20,286 INFO [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=707768, jitterRate=-0.10002705454826355}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T18:33:20,287 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731436400273Initializing all the Stores at 1731436400274 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436400274Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436400274Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436400274Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436400274Cleaning up temporary data from old regions at 1731436400282 (+8 ms)Region opened successfully at 1731436400286 (+4 ms) 2024-11-12T18:33:20,287 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-12T18:33:20,291 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74a86061, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:33:20,292 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-12T18:33:20,292 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-12T18:33:20,292 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-12T18:33:20,292 INFO [master/9911683f163c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-12T18:33:20,293 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-12T18:33:20,293 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-12T18:33:20,293 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-12T18:33:20,295 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-12T18:33:20,296 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-12T18:33:20,299 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-12T18:33:20,299 INFO [master/9911683f163c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-12T18:33:20,300 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-12T18:33:20,301 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-12T18:33:20,301 INFO [master/9911683f163c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-12T18:33:20,302 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-12T18:33:20,303 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-12T18:33:20,304 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-12T18:33:20,305 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-12T18:33:20,307 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-12T18:33:20,308 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-12T18:33:20,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:33:20,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T18:33:20,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:33:20,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:33:20,310 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=9911683f163c,41935,1731436400145, sessionid=0x1003544d8a20000, setting cluster-up flag (Was=false) 2024-11-12T18:33:20,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:33:20,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:33:20,317 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-12T18:33:20,318 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9911683f163c,41935,1731436400145 2024-11-12T18:33:20,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:33:20,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:33:20,326 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-12T18:33:20,327 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9911683f163c,41935,1731436400145 2024-11-12T18:33:20,329 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-12T18:33:20,330 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-12T18:33:20,330 INFO [master/9911683f163c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-12T18:33:20,330 INFO [master/9911683f163c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-12T18:33:20,331 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 9911683f163c,41935,1731436400145 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-12T18:33:20,332 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:33:20,332 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:33:20,332 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:33:20,332 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/9911683f163c:0, corePoolSize=5, maxPoolSize=5 2024-11-12T18:33:20,332 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/9911683f163c:0, corePoolSize=10, maxPoolSize=10 2024-11-12T18:33:20,332 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:33:20,332 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:33:20,332 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:33:20,333 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:33:20,334 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-12T18:33:20,334 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731436430334 2024-11-12T18:33:20,334 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-12T18:33:20,334 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-12T18:33:20,334 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-12T18:33:20,334 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-12T18:33:20,334 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-12T18:33:20,335 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-12T18:33:20,335 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:33:20,335 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:20,335 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-12T18:33:20,335 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-12T18:33:20,335 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-12T18:33:20,335 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-12T18:33:20,335 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-12T18:33:20,335 INFO [master/9911683f163c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-12T18:33:20,336 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436400336,5,FailOnTimeoutGroup] 2024-11-12T18:33:20,336 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436400336,5,FailOnTimeoutGroup] 2024-11-12T18:33:20,336 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:20,336 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-12T18:33:20,336 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:20,336 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:20,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40861 is added to blk_1073741831_1007 (size=1321) 2024-11-12T18:33:20,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39169 is added to blk_1073741831_1007 (size=1321) 2024-11-12T18:33:20,345 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-12T18:33:20,345 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722 2024-11-12T18:33:20,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40861 is added to blk_1073741832_1008 (size=32) 2024-11-12T18:33:20,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39169 is added to blk_1073741832_1008 (size=32) 2024-11-12T18:33:20,352 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:33:20,353 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T18:33:20,354 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T18:33:20,354 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:33:20,354 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:33:20,355 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T18:33:20,356 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T18:33:20,356 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:33:20,356 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:33:20,356 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T18:33:20,357 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T18:33:20,357 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:33:20,357 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:33:20,357 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T18:33:20,358 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T18:33:20,359 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:33:20,359 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:33:20,359 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T18:33:20,359 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/data/hbase/meta/1588230740 2024-11-12T18:33:20,360 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/data/hbase/meta/1588230740 2024-11-12T18:33:20,361 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T18:33:20,361 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T18:33:20,361 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T18:33:20,362 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T18:33:20,364 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T18:33:20,364 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=734957, jitterRate=-0.0654541552066803}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T18:33:20,365 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731436400352Initializing all the Stores at 1731436400353 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436400353Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436400353Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436400353Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436400353Cleaning up temporary data from old regions at 1731436400361 (+8 ms)Region opened successfully at 1731436400365 (+4 ms) 2024-11-12T18:33:20,365 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T18:33:20,365 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T18:33:20,365 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T18:33:20,365 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T18:33:20,365 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T18:33:20,365 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T18:33:20,365 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731436400365Disabling compacts and flushes for region at 1731436400365Disabling writes for close at 1731436400365Writing region close event to WAL at 1731436400365Closed at 1731436400365 2024-11-12T18:33:20,367 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:33:20,367 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-12T18:33:20,367 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-12T18:33:20,368 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T18:33:20,369 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-12T18:33:20,401 INFO [RS:0;9911683f163c:45743 {}] regionserver.HRegionServer(746): ClusterId : b9d72334-7bbf-408f-8986-2c6e34bb93ee 2024-11-12T18:33:20,401 DEBUG [RS:0;9911683f163c:45743 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T18:33:20,403 DEBUG [RS:0;9911683f163c:45743 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T18:33:20,403 DEBUG [RS:0;9911683f163c:45743 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T18:33:20,405 DEBUG [RS:0;9911683f163c:45743 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T18:33:20,405 DEBUG [RS:0;9911683f163c:45743 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32373b3c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9911683f163c/172.17.0.3:0 2024-11-12T18:33:20,418 DEBUG [RS:0;9911683f163c:45743 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;9911683f163c:45743 2024-11-12T18:33:20,418 INFO [RS:0;9911683f163c:45743 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T18:33:20,418 INFO [RS:0;9911683f163c:45743 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T18:33:20,418 DEBUG [RS:0;9911683f163c:45743 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T18:33:20,419 INFO [RS:0;9911683f163c:45743 {}] regionserver.HRegionServer(2659): reportForDuty to master=9911683f163c,41935,1731436400145 with port=45743, startcode=1731436400186 2024-11-12T18:33:20,419 DEBUG [RS:0;9911683f163c:45743 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T18:33:20,421 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55689, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T18:33:20,422 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41935 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9911683f163c,45743,1731436400186 2024-11-12T18:33:20,422 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41935 {}] master.ServerManager(517): Registering regionserver=9911683f163c,45743,1731436400186 2024-11-12T18:33:20,423 DEBUG [RS:0;9911683f163c:45743 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722 2024-11-12T18:33:20,423 DEBUG [RS:0;9911683f163c:45743 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42451 2024-11-12T18:33:20,423 DEBUG [RS:0;9911683f163c:45743 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T18:33:20,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:33:20,425 DEBUG [RS:0;9911683f163c:45743 {}] zookeeper.ZKUtil(111): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9911683f163c,45743,1731436400186 2024-11-12T18:33:20,425 WARN [RS:0;9911683f163c:45743 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T18:33:20,425 INFO [RS:0;9911683f163c:45743 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:33:20,426 DEBUG [RS:0;9911683f163c:45743 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/WALs/9911683f163c,45743,1731436400186 2024-11-12T18:33:20,426 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9911683f163c,45743,1731436400186] 2024-11-12T18:33:20,429 INFO [RS:0;9911683f163c:45743 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T18:33:20,430 INFO [RS:0;9911683f163c:45743 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T18:33:20,431 INFO [RS:0;9911683f163c:45743 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T18:33:20,431 INFO [RS:0;9911683f163c:45743 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:20,431 INFO [RS:0;9911683f163c:45743 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T18:33:20,432 INFO [RS:0;9911683f163c:45743 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T18:33:20,432 INFO [RS:0;9911683f163c:45743 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:20,432 DEBUG [RS:0;9911683f163c:45743 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:33:20,432 DEBUG [RS:0;9911683f163c:45743 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:33:20,432 DEBUG [RS:0;9911683f163c:45743 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:33:20,432 DEBUG [RS:0;9911683f163c:45743 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:33:20,432 DEBUG [RS:0;9911683f163c:45743 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:33:20,432 DEBUG [RS:0;9911683f163c:45743 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9911683f163c:0, corePoolSize=2, maxPoolSize=2 2024-11-12T18:33:20,432 DEBUG [RS:0;9911683f163c:45743 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:33:20,432 DEBUG [RS:0;9911683f163c:45743 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:33:20,432 DEBUG [RS:0;9911683f163c:45743 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:33:20,432 DEBUG [RS:0;9911683f163c:45743 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:33:20,432 DEBUG [RS:0;9911683f163c:45743 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:33:20,432 DEBUG [RS:0;9911683f163c:45743 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9911683f163c:0, corePoolSize=1, maxPoolSize=1 2024-11-12T18:33:20,432 DEBUG [RS:0;9911683f163c:45743 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:33:20,432 DEBUG [RS:0;9911683f163c:45743 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9911683f163c:0, corePoolSize=3, maxPoolSize=3 2024-11-12T18:33:20,436 INFO [RS:0;9911683f163c:45743 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:20,436 INFO [RS:0;9911683f163c:45743 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:20,436 INFO [RS:0;9911683f163c:45743 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:20,436 INFO [RS:0;9911683f163c:45743 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:20,436 INFO [RS:0;9911683f163c:45743 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:20,436 INFO [RS:0;9911683f163c:45743 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,45743,1731436400186-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:33:20,451 INFO [RS:0;9911683f163c:45743 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T18:33:20,452 INFO [RS:0;9911683f163c:45743 {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,45743,1731436400186-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:20,452 INFO [RS:0;9911683f163c:45743 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:20,452 INFO [RS:0;9911683f163c:45743 {}] regionserver.Replication(171): 9911683f163c,45743,1731436400186 started 2024-11-12T18:33:20,466 INFO [RS:0;9911683f163c:45743 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:20,466 INFO [RS:0;9911683f163c:45743 {}] regionserver.HRegionServer(1482): Serving as 9911683f163c,45743,1731436400186, RpcServer on 9911683f163c/172.17.0.3:45743, sessionid=0x1003544d8a20001 2024-11-12T18:33:20,467 DEBUG [RS:0;9911683f163c:45743 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T18:33:20,467 DEBUG [RS:0;9911683f163c:45743 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9911683f163c,45743,1731436400186 2024-11-12T18:33:20,467 DEBUG [RS:0;9911683f163c:45743 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,45743,1731436400186' 2024-11-12T18:33:20,467 DEBUG [RS:0;9911683f163c:45743 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T18:33:20,467 DEBUG [RS:0;9911683f163c:45743 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T18:33:20,468 DEBUG [RS:0;9911683f163c:45743 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T18:33:20,468 DEBUG [RS:0;9911683f163c:45743 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T18:33:20,468 DEBUG [RS:0;9911683f163c:45743 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9911683f163c,45743,1731436400186 2024-11-12T18:33:20,468 DEBUG [RS:0;9911683f163c:45743 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9911683f163c,45743,1731436400186' 2024-11-12T18:33:20,468 DEBUG [RS:0;9911683f163c:45743 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T18:33:20,468 DEBUG [RS:0;9911683f163c:45743 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T18:33:20,469 DEBUG [RS:0;9911683f163c:45743 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T18:33:20,469 INFO [RS:0;9911683f163c:45743 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T18:33:20,469 INFO [RS:0;9911683f163c:45743 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T18:33:20,519 WARN [9911683f163c:41935 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-12T18:33:20,571 INFO [RS:0;9911683f163c:45743 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C45743%2C1731436400186, suffix=, logDir=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/WALs/9911683f163c,45743,1731436400186, archiveDir=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/oldWALs, maxLogs=32 2024-11-12T18:33:20,571 INFO [RS:0;9911683f163c:45743 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C45743%2C1731436400186.1731436400571 2024-11-12T18:33:20,577 INFO [RS:0;9911683f163c:45743 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/WALs/9911683f163c,45743,1731436400186/9911683f163c%2C45743%2C1731436400186.1731436400571 2024-11-12T18:33:20,578 DEBUG [RS:0;9911683f163c:45743 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40347:40347),(127.0.0.1/127.0.0.1:35545:35545)] 2024-11-12T18:33:20,769 DEBUG [9911683f163c:41935 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-12T18:33:20,770 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=9911683f163c,45743,1731436400186 2024-11-12T18:33:20,771 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9911683f163c,45743,1731436400186, state=OPENING 2024-11-12T18:33:20,773 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-12T18:33:20,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:33:20,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:33:20,774 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T18:33:20,775 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:33:20,775 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:33:20,775 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=9911683f163c,45743,1731436400186}] 2024-11-12T18:33:20,927 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T18:33:20,929 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34173, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T18:33:20,933 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-12T18:33:20,933 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:33:20,935 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9911683f163c%2C45743%2C1731436400186.meta, suffix=.meta, logDir=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/WALs/9911683f163c,45743,1731436400186, archiveDir=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/oldWALs, maxLogs=32 2024-11-12T18:33:20,935 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 9911683f163c%2C45743%2C1731436400186.meta.1731436400935.meta 2024-11-12T18:33:20,940 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/WALs/9911683f163c,45743,1731436400186/9911683f163c%2C45743%2C1731436400186.meta.1731436400935.meta 2024-11-12T18:33:20,943 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40347:40347),(127.0.0.1/127.0.0.1:35545:35545)] 2024-11-12T18:33:20,950 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-12T18:33:20,950 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-12T18:33:20,950 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-12T18:33:20,951 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-12T18:33:20,951 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-12T18:33:20,951 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T18:33:20,951 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-12T18:33:20,951 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-12T18:33:20,952 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T18:33:20,953 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T18:33:20,953 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:33:20,953 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:33:20,953 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T18:33:20,954 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T18:33:20,954 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:33:20,954 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:33:20,954 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T18:33:20,955 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T18:33:20,955 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:33:20,955 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:33:20,955 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T18:33:20,956 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T18:33:20,956 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T18:33:20,956 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T18:33:20,956 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T18:33:20,957 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/data/hbase/meta/1588230740 2024-11-12T18:33:20,958 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/data/hbase/meta/1588230740 2024-11-12T18:33:20,959 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T18:33:20,959 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T18:33:20,959 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-12T18:33:20,960 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T18:33:20,961 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=877539, jitterRate=0.11584876477718353}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-12T18:33:20,961 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-12T18:33:20,961 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731436400951Writing region info on filesystem at 1731436400951Initializing all the Stores at 1731436400952 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436400952Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436400952Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731436400952Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731436400952Cleaning up temporary data from old regions at 1731436400959 (+7 ms)Running coprocessor post-open hooks at 1731436400961 (+2 ms)Region opened successfully at 1731436400961 2024-11-12T18:33:20,962 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731436400927 2024-11-12T18:33:20,965 DEBUG [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-12T18:33:20,965 INFO [RS_OPEN_META-regionserver/9911683f163c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-12T18:33:20,965 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=9911683f163c,45743,1731436400186 2024-11-12T18:33:20,966 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9911683f163c,45743,1731436400186, state=OPEN 2024-11-12T18:33:20,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:33:20,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T18:33:20,970 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=9911683f163c,45743,1731436400186 2024-11-12T18:33:20,970 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:33:20,970 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T18:33:20,973 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-12T18:33:20,973 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=9911683f163c,45743,1731436400186 in 195 msec 2024-11-12T18:33:20,975 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-12T18:33:20,975 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 606 msec 2024-11-12T18:33:20,975 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T18:33:20,975 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-12T18:33:20,977 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T18:33:20,977 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9911683f163c,45743,1731436400186, seqNum=-1] 2024-11-12T18:33:20,977 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T18:33:20,978 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42089, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T18:33:20,983 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 653 msec 2024-11-12T18:33:20,983 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731436400983, completionTime=-1 2024-11-12T18:33:20,983 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-12T18:33:20,983 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-12T18:33:20,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-12T18:33:20,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731436460985 2024-11-12T18:33:20,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731436520985 2024-11-12T18:33:20,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-12T18:33:20,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,41935,1731436400145-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:20,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,41935,1731436400145-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:20,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,41935,1731436400145-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:20,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-9911683f163c:41935, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:20,985 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:20,986 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:20,987 DEBUG [master/9911683f163c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-12T18:33:20,989 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.772sec 2024-11-12T18:33:20,989 INFO [master/9911683f163c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-12T18:33:20,989 INFO [master/9911683f163c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-12T18:33:20,989 INFO [master/9911683f163c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-12T18:33:20,989 INFO [master/9911683f163c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-12T18:33:20,989 INFO [master/9911683f163c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-12T18:33:20,990 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,41935,1731436400145-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T18:33:20,990 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,41935,1731436400145-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-12T18:33:20,992 DEBUG [master/9911683f163c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-12T18:33:20,992 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-12T18:33:20,992 INFO [master/9911683f163c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9911683f163c,41935,1731436400145-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T18:33:21,001 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@352bc0e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:33:21,001 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 9911683f163c,41935,-1 for getting cluster id 2024-11-12T18:33:21,001 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-12T18:33:21,003 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b9d72334-7bbf-408f-8986-2c6e34bb93ee' 2024-11-12T18:33:21,003 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-12T18:33:21,003 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b9d72334-7bbf-408f-8986-2c6e34bb93ee" 2024-11-12T18:33:21,004 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f9e6ce8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:33:21,004 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9911683f163c,41935,-1] 2024-11-12T18:33:21,004 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-12T18:33:21,004 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:33:21,005 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59442, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-12T18:33:21,006 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33bada87, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T18:33:21,007 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T18:33:21,007 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9911683f163c,45743,1731436400186, seqNum=-1] 2024-11-12T18:33:21,008 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T18:33:21,009 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33724, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T18:33:21,010 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=9911683f163c,41935,1731436400145 2024-11-12T18:33:21,011 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T18:33:21,013 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-12T18:33:21,013 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T18:33:21,015 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/WALs/test.com,8080,1, archiveDir=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/oldWALs, maxLogs=32 2024-11-12T18:33:21,015 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731436401015 2024-11-12T18:33:21,020 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/WALs/test.com,8080,1/test.com%2C8080%2C1.1731436401015 2024-11-12T18:33:21,024 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35545:35545),(127.0.0.1/127.0.0.1:40347:40347)] 2024-11-12T18:33:21,024 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731436401024 2024-11-12T18:33:21,029 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,030 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,030 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,030 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,030 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,030 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/WALs/test.com,8080,1/test.com%2C8080%2C1.1731436401015 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/WALs/test.com,8080,1/test.com%2C8080%2C1.1731436401024 2024-11-12T18:33:21,033 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40347:40347),(127.0.0.1/127.0.0.1:35545:35545)] 2024-11-12T18:33:21,033 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/WALs/test.com,8080,1/test.com%2C8080%2C1.1731436401015 is not closed yet, will try archiving it next time 2024-11-12T18:33:21,033 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,034 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40861 is added to blk_1073741835_1011 (size=93) 2024-11-12T18:33:21,034 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39169 is added to blk_1073741835_1011 (size=93) 2024-11-12T18:33:21,034 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,034 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39169 is added to blk_1073741836_1012 (size=93) 2024-11-12T18:33:21,036 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/WALs/test.com,8080,1/test.com%2C8080%2C1.1731436401015 to hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/oldWALs/test.com%2C8080%2C1.1731436401015 2024-11-12T18:33:21,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40861 is added to blk_1073741836_1012 (size=93) 2024-11-12T18:33:21,038 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/oldWALs 2024-11-12T18:33:21,038 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731436401024) 2024-11-12T18:33:21,038 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-12T18:33:21,038 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T18:33:21,038 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:33:21,038 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:33:21,039 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:33:21,039 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-12T18:33:21,039 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-12T18:33:21,039 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1337801983, stopped=false 2024-11-12T18:33:21,039 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=9911683f163c,41935,1731436400145 2024-11-12T18:33:21,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:33:21,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T18:33:21,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:33:21,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:33:21,040 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T18:33:21,041 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T18:33:21,041 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:33:21,041 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:33:21,041 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9911683f163c,45743,1731436400186' ***** 2024-11-12T18:33:21,041 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T18:33:21,041 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:33:21,041 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T18:33:21,041 INFO [RS:0;9911683f163c:45743 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T18:33:21,042 INFO [RS:0;9911683f163c:45743 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T18:33:21,042 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T18:33:21,042 INFO [RS:0;9911683f163c:45743 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T18:33:21,042 INFO [RS:0;9911683f163c:45743 {}] regionserver.HRegionServer(959): stopping server 9911683f163c,45743,1731436400186 2024-11-12T18:33:21,042 INFO [RS:0;9911683f163c:45743 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:33:21,042 INFO [RS:0;9911683f163c:45743 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;9911683f163c:45743. 2024-11-12T18:33:21,042 DEBUG [RS:0;9911683f163c:45743 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T18:33:21,042 DEBUG [RS:0;9911683f163c:45743 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:33:21,042 INFO [RS:0;9911683f163c:45743 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T18:33:21,042 INFO [RS:0;9911683f163c:45743 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T18:33:21,042 INFO [RS:0;9911683f163c:45743 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T18:33:21,042 INFO [RS:0;9911683f163c:45743 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-12T18:33:21,042 INFO [RS:0;9911683f163c:45743 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-12T18:33:21,042 DEBUG [RS:0;9911683f163c:45743 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-12T18:33:21,042 DEBUG [RS:0;9911683f163c:45743 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-12T18:33:21,043 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T18:33:21,043 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T18:33:21,043 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T18:33:21,043 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T18:33:21,043 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T18:33:21,043 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-12T18:33:21,059 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/data/hbase/meta/1588230740/.tmp/ns/d44e4a33a0f142d98188e59695d18fd2 is 43, key is default/ns:d/1731436400979/Put/seqid=0 2024-11-12T18:33:21,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39169 is added to blk_1073741837_1013 (size=5153) 2024-11-12T18:33:21,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40861 is added to blk_1073741837_1013 (size=5153) 2024-11-12T18:33:21,065 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/data/hbase/meta/1588230740/.tmp/ns/d44e4a33a0f142d98188e59695d18fd2 2024-11-12T18:33:21,070 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/data/hbase/meta/1588230740/.tmp/ns/d44e4a33a0f142d98188e59695d18fd2 as hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/data/hbase/meta/1588230740/ns/d44e4a33a0f142d98188e59695d18fd2 2024-11-12T18:33:21,075 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/data/hbase/meta/1588230740/ns/d44e4a33a0f142d98188e59695d18fd2, entries=2, sequenceid=6, filesize=5.0 K 2024-11-12T18:33:21,077 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 34ms, sequenceid=6, compaction requested=false 2024-11-12T18:33:21,081 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-12T18:33:21,081 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T18:33:21,081 INFO [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T18:33:21,082 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731436401043Running coprocessor pre-close hooks at 1731436401043Disabling compacts and flushes for region at 1731436401043Disabling writes for close at 1731436401043Obtaining lock to block concurrent updates at 1731436401043Preparing flush snapshotting stores in 1588230740 at 1731436401043Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731436401043Flushing stores of hbase:meta,,1.1588230740 at 1731436401044 (+1 ms)Flushing 1588230740/ns: creating writer at 1731436401044Flushing 1588230740/ns: appending metadata at 1731436401058 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731436401058Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65220df8: reopening flushed file at 1731436401070 (+12 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 34ms, sequenceid=6, compaction requested=false at 1731436401077 (+7 ms)Writing region close event to WAL at 1731436401077Running coprocessor post-close hooks at 1731436401081 (+4 ms)Closed at 1731436401081 2024-11-12T18:33:21,082 DEBUG [RS_CLOSE_META-regionserver/9911683f163c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-12T18:33:21,243 INFO [RS:0;9911683f163c:45743 {}] regionserver.HRegionServer(976): stopping server 9911683f163c,45743,1731436400186; all regions closed. 2024-11-12T18:33:21,243 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,243 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,243 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,244 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,244 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39169 is added to blk_1073741834_1010 (size=1152) 2024-11-12T18:33:21,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40861 is added to blk_1073741834_1010 (size=1152) 2024-11-12T18:33:21,248 DEBUG [RS:0;9911683f163c:45743 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/oldWALs 2024-11-12T18:33:21,248 INFO [RS:0;9911683f163c:45743 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9911683f163c%2C45743%2C1731436400186.meta:.meta(num 1731436400935) 2024-11-12T18:33:21,248 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,248 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,248 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,248 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,249 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39169 is added to blk_1073741833_1009 (size=93) 2024-11-12T18:33:21,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40861 is added to blk_1073741833_1009 (size=93) 2024-11-12T18:33:21,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,44807,1731436210143/9911683f163c%2C44807%2C1731436210143.1731436210337 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:21,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37157/user/jenkins/test-data/46d6dd51-fda0-7bfa-7734-cf6e88e73ef1/WALs/9911683f163c,33915,1731436209168/9911683f163c%2C33915%2C1731436209168.meta.1731436210021.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-12T18:33:21,253 DEBUG [RS:0;9911683f163c:45743 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/oldWALs 2024-11-12T18:33:21,253 INFO [RS:0;9911683f163c:45743 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9911683f163c%2C45743%2C1731436400186:(num 1731436400571) 2024-11-12T18:33:21,253 DEBUG [RS:0;9911683f163c:45743 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T18:33:21,253 INFO [RS:0;9911683f163c:45743 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T18:33:21,253 INFO [RS:0;9911683f163c:45743 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:33:21,253 INFO [RS:0;9911683f163c:45743 {}] hbase.ChoreService(370): Chore service for: regionserver/9911683f163c:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-12T18:33:21,253 INFO [RS:0;9911683f163c:45743 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:33:21,253 INFO [regionserver/9911683f163c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:33:21,253 INFO [RS:0;9911683f163c:45743 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45743 2024-11-12T18:33:21,255 INFO [RS:0;9911683f163c:45743 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:33:21,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T18:33:21,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9911683f163c,45743,1731436400186 2024-11-12T18:33:21,257 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9911683f163c,45743,1731436400186] 2024-11-12T18:33:21,258 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9911683f163c,45743,1731436400186 already deleted, retry=false 2024-11-12T18:33:21,258 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9911683f163c,45743,1731436400186 expired; onlineServers=0 2024-11-12T18:33:21,258 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '9911683f163c,41935,1731436400145' ***** 2024-11-12T18:33:21,258 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-12T18:33:21,258 INFO [M:0;9911683f163c:41935 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T18:33:21,258 INFO [M:0;9911683f163c:41935 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T18:33:21,258 DEBUG [M:0;9911683f163c:41935 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-12T18:33:21,258 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-12T18:33:21,258 DEBUG [M:0;9911683f163c:41935 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-12T18:33:21,258 DEBUG [master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436400336 {}] cleaner.HFileCleaner(306): Exit Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.large.0-1731436400336,5,FailOnTimeoutGroup] 2024-11-12T18:33:21,258 DEBUG [master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436400336 {}] cleaner.HFileCleaner(306): Exit Thread[master/9911683f163c:0:becomeActiveMaster-HFileCleaner.small.0-1731436400336,5,FailOnTimeoutGroup] 2024-11-12T18:33:21,258 INFO [M:0;9911683f163c:41935 {}] hbase.ChoreService(370): Chore service for: master/9911683f163c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-12T18:33:21,258 INFO [M:0;9911683f163c:41935 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T18:33:21,258 DEBUG [M:0;9911683f163c:41935 {}] master.HMaster(1795): Stopping service threads 2024-11-12T18:33:21,258 INFO [M:0;9911683f163c:41935 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-12T18:33:21,258 INFO [M:0;9911683f163c:41935 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T18:33:21,259 INFO [M:0;9911683f163c:41935 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-12T18:33:21,259 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-12T18:33:21,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-12T18:33:21,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T18:33:21,260 DEBUG [M:0;9911683f163c:41935 {}] zookeeper.ZKUtil(347): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-12T18:33:21,260 WARN [M:0;9911683f163c:41935 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-12T18:33:21,260 INFO [M:0;9911683f163c:41935 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/.lastflushedseqids 2024-11-12T18:33:21,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39169 is added to blk_1073741838_1014 (size=99) 2024-11-12T18:33:21,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40861 is added to blk_1073741838_1014 (size=99) 2024-11-12T18:33:21,265 INFO [M:0;9911683f163c:41935 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-12T18:33:21,265 INFO [M:0;9911683f163c:41935 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-12T18:33:21,266 DEBUG [M:0;9911683f163c:41935 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T18:33:21,266 INFO [M:0;9911683f163c:41935 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:33:21,266 DEBUG [M:0;9911683f163c:41935 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:33:21,266 DEBUG [M:0;9911683f163c:41935 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T18:33:21,266 DEBUG [M:0;9911683f163c:41935 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:33:21,266 INFO [M:0;9911683f163c:41935 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-12T18:33:21,282 DEBUG [M:0;9911683f163c:41935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/008395a4e4d5450bb3d6226c2209772c is 82, key is hbase:meta,,1/info:regioninfo/1731436400965/Put/seqid=0 2024-11-12T18:33:21,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40861 is added to blk_1073741839_1015 (size=5672) 2024-11-12T18:33:21,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39169 is added to blk_1073741839_1015 (size=5672) 2024-11-12T18:33:21,287 INFO [M:0;9911683f163c:41935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/008395a4e4d5450bb3d6226c2209772c 2024-11-12T18:33:21,306 DEBUG [M:0;9911683f163c:41935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9e6de1cc90d24c6ca3d5801aa0acbd5a is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731436400982/Put/seqid=0 2024-11-12T18:33:21,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40861 is added to blk_1073741840_1016 (size=5275) 2024-11-12T18:33:21,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39169 is added to blk_1073741840_1016 (size=5275) 2024-11-12T18:33:21,312 INFO [M:0;9911683f163c:41935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9e6de1cc90d24c6ca3d5801aa0acbd5a 2024-11-12T18:33:21,331 DEBUG [M:0;9911683f163c:41935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/12e1b6665eb4472bbcb2dfd72980ee45 is 69, key is 9911683f163c,45743,1731436400186/rs:state/1731436400422/Put/seqid=0 2024-11-12T18:33:21,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39169 is added to blk_1073741841_1017 (size=5156) 2024-11-12T18:33:21,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40861 is added to blk_1073741841_1017 (size=5156) 2024-11-12T18:33:21,337 INFO [M:0;9911683f163c:41935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/12e1b6665eb4472bbcb2dfd72980ee45 2024-11-12T18:33:21,357 INFO [RS:0;9911683f163c:45743 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:33:21,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:33:21,357 INFO [RS:0;9911683f163c:45743 {}] regionserver.HRegionServer(1031): Exiting; stopping=9911683f163c,45743,1731436400186; zookeeper connection closed. 2024-11-12T18:33:21,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45743-0x1003544d8a20001, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:33:21,357 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@483efe8e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@483efe8e 2024-11-12T18:33:21,357 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-12T18:33:21,364 DEBUG [M:0;9911683f163c:41935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ea2f25032168454d8d2c1307dfbc3f16 is 52, key is load_balancer_on/state:d/1731436401012/Put/seqid=0 2024-11-12T18:33:21,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40861 is added to blk_1073741842_1018 (size=5056) 2024-11-12T18:33:21,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39169 is added to blk_1073741842_1018 (size=5056) 2024-11-12T18:33:21,369 INFO [M:0;9911683f163c:41935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ea2f25032168454d8d2c1307dfbc3f16 2024-11-12T18:33:21,374 DEBUG [M:0;9911683f163c:41935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/008395a4e4d5450bb3d6226c2209772c as hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/008395a4e4d5450bb3d6226c2209772c 2024-11-12T18:33:21,378 INFO [M:0;9911683f163c:41935 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/008395a4e4d5450bb3d6226c2209772c, entries=8, sequenceid=29, filesize=5.5 K 2024-11-12T18:33:21,379 DEBUG [M:0;9911683f163c:41935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9e6de1cc90d24c6ca3d5801aa0acbd5a as hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9e6de1cc90d24c6ca3d5801aa0acbd5a 2024-11-12T18:33:21,384 INFO [M:0;9911683f163c:41935 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9e6de1cc90d24c6ca3d5801aa0acbd5a, entries=3, sequenceid=29, filesize=5.2 K 2024-11-12T18:33:21,385 DEBUG [M:0;9911683f163c:41935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/12e1b6665eb4472bbcb2dfd72980ee45 as hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/12e1b6665eb4472bbcb2dfd72980ee45 2024-11-12T18:33:21,389 INFO [M:0;9911683f163c:41935 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/12e1b6665eb4472bbcb2dfd72980ee45, entries=1, sequenceid=29, filesize=5.0 K 2024-11-12T18:33:21,389 DEBUG [M:0;9911683f163c:41935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ea2f25032168454d8d2c1307dfbc3f16 as hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ea2f25032168454d8d2c1307dfbc3f16 2024-11-12T18:33:21,393 INFO [M:0;9911683f163c:41935 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42451/user/jenkins/test-data/6c436c47-d845-efd4-d969-0281961a5722/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ea2f25032168454d8d2c1307dfbc3f16, entries=1, sequenceid=29, filesize=4.9 K 2024-11-12T18:33:21,394 INFO [M:0;9911683f163c:41935 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=29, compaction requested=false 2024-11-12T18:33:21,396 INFO [M:0;9911683f163c:41935 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T18:33:21,396 DEBUG [M:0;9911683f163c:41935 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731436401266Disabling compacts and flushes for region at 1731436401266Disabling writes for close at 1731436401266Obtaining lock to block concurrent updates at 1731436401266Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731436401266Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731436401266Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731436401267 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731436401267Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731436401281 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731436401281Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731436401291 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731436401306 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731436401306Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731436401316 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731436401331 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731436401331Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731436401341 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731436401363 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731436401363Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a14a2b0: reopening flushed file at 1731436401373 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a05a060: reopening flushed file at 1731436401378 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9a9e8a9: reopening flushed file at 1731436401384 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1afe0d44: reopening flushed file at 1731436401389 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=29, compaction requested=false at 1731436401394 (+5 ms)Writing region close event to WAL at 1731436401396 (+2 ms)Closed at 1731436401396 2024-11-12T18:33:21,396 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,396 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,396 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,396 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,396 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T18:33:21,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40861 is added to blk_1073741830_1006 (size=10311) 2024-11-12T18:33:21,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39169 is added to blk_1073741830_1006 (size=10311) 2024-11-12T18:33:21,399 INFO [M:0;9911683f163c:41935 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-12T18:33:21,399 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T18:33:21,399 INFO [M:0;9911683f163c:41935 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41935 2024-11-12T18:33:21,399 INFO [M:0;9911683f163c:41935 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T18:33:21,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:33:21,501 INFO [M:0;9911683f163c:41935 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T18:33:21,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41935-0x1003544d8a20000, quorum=127.0.0.1:63091, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T18:33:21,503 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@50573d5f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:33:21,504 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@374ad87b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:33:21,504 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:33:21,504 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e50120a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:33:21,504 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7923b539{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/hadoop.log.dir/,STOPPED} 2024-11-12T18:33:21,505 WARN [BP-211007062-172.17.0.3-1731436399474 heartbeating to localhost/127.0.0.1:42451 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:33:21,505 WARN [BP-211007062-172.17.0.3-1731436399474 heartbeating to localhost/127.0.0.1:42451 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-211007062-172.17.0.3-1731436399474 (Datanode Uuid 5c34f5a4-4000-4250-9279-49f7e87bdcb0) service to localhost/127.0.0.1:42451 2024-11-12T18:33:21,505 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:33:21,505 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:33:21,506 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/cluster_dd51cd91-0d24-fd5d-5373-e2bd48afe9fe/data/data3/current/BP-211007062-172.17.0.3-1731436399474 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:33:21,506 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/cluster_dd51cd91-0d24-fd5d-5373-e2bd48afe9fe/data/data4/current/BP-211007062-172.17.0.3-1731436399474 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:33:21,506 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:33:21,508 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5eb115e2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T18:33:21,509 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3faa96fd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:33:21,509 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:33:21,509 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@470bba0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:33:21,509 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@382146a9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/hadoop.log.dir/,STOPPED} 2024-11-12T18:33:21,510 WARN [BP-211007062-172.17.0.3-1731436399474 heartbeating to localhost/127.0.0.1:42451 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T18:33:21,510 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T18:33:21,510 WARN [BP-211007062-172.17.0.3-1731436399474 heartbeating to localhost/127.0.0.1:42451 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-211007062-172.17.0.3-1731436399474 (Datanode Uuid 8a5e2625-b93a-4d0c-ba5f-909f155ce126) service to localhost/127.0.0.1:42451 2024-11-12T18:33:21,510 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T18:33:21,510 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/cluster_dd51cd91-0d24-fd5d-5373-e2bd48afe9fe/data/data1/current/BP-211007062-172.17.0.3-1731436399474 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:33:21,511 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/cluster_dd51cd91-0d24-fd5d-5373-e2bd48afe9fe/data/data2/current/BP-211007062-172.17.0.3-1731436399474 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T18:33:21,511 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T18:33:21,516 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7667b0c0{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T18:33:21,517 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@319a0a77{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T18:33:21,517 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T18:33:21,517 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e982213{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T18:33:21,517 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b67b6af{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/34556304-8d66-f54a-ba8b-d794ca9d77d1/hadoop.log.dir/,STOPPED} 2024-11-12T18:33:21,523 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-12T18:33:21,538 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-12T18:33:21,547 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=267 (was 230) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42451 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:42451 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (122747033) connection to localhost/127.0.0.1:42451 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (122747033) connection to localhost/127.0.0.1:42451 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42451 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42451 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42451 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (122747033) connection to localhost/127.0.0.1:42451 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=538 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=56 (was 44) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6272 (was 6291)