2024-11-10 06:29:10,974 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-10 06:29:10,986 main DEBUG Took 0.010501 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-10 06:29:10,986 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-10 06:29:10,987 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-10 06:29:10,988 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-10 06:29:10,989 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 06:29:10,996 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-10 06:29:11,007 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 06:29:11,008 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 06:29:11,009 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 06:29:11,009 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 06:29:11,010 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 06:29:11,010 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 06:29:11,011 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 06:29:11,011 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 06:29:11,012 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 06:29:11,012 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 06:29:11,013 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 06:29:11,013 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 06:29:11,013 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 06:29:11,014 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 06:29:11,014 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 06:29:11,014 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 06:29:11,015 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 06:29:11,015 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 06:29:11,015 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 06:29:11,016 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 06:29:11,016 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 06:29:11,016 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 06:29:11,017 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 06:29:11,017 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 06:29:11,018 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 06:29:11,018 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-10 06:29:11,019 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 06:29:11,020 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-10 06:29:11,022 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-10 06:29:11,022 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-10 06:29:11,023 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-10 06:29:11,024 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-10 06:29:11,031 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-10 06:29:11,034 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-10 06:29:11,035 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-10 06:29:11,036 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-10 06:29:11,036 main DEBUG createAppenders(={Console}) 2024-11-10 06:29:11,037 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-10 06:29:11,037 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-10 06:29:11,037 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-10 06:29:11,038 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-10 06:29:11,038 main DEBUG OutputStream closed 2024-11-10 06:29:11,038 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-10 06:29:11,038 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-10 06:29:11,038 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-10 06:29:11,105 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-10 06:29:11,107 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-10 06:29:11,108 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-10 06:29:11,109 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-10 06:29:11,109 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-10 06:29:11,109 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-10 06:29:11,110 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-10 06:29:11,110 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-10 06:29:11,110 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-10 06:29:11,110 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-10 06:29:11,111 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-10 06:29:11,111 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-10 06:29:11,111 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-10 06:29:11,112 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-10 06:29:11,112 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-10 06:29:11,112 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-10 06:29:11,112 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-10 06:29:11,113 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-10 06:29:11,115 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-10 06:29:11,116 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-10 06:29:11,116 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-10 06:29:11,117 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-10T06:29:11,354 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969 2024-11-10 06:29:11,357 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-10 06:29:11,358 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-10T06:29:11,367 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-10T06:29:11,401 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=5, ProcessCount=11, AvailableMemoryMB=8170 2024-11-10T06:29:11,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T06:29:11,418 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/cluster_7c3bcd10-8fd8-bc58-53c9-8bdeb6c9f462, deleteOnExit=true 2024-11-10T06:29:11,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-10T06:29:11,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/test.cache.data in system properties and HBase conf 2024-11-10T06:29:11,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T06:29:11,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/hadoop.log.dir in system properties and HBase conf 2024-11-10T06:29:11,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T06:29:11,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T06:29:11,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T06:29:11,508 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-10T06:29:11,592 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T06:29:11,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T06:29:11,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T06:29:11,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T06:29:11,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T06:29:11,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T06:29:11,600 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T06:29:11,600 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T06:29:11,600 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T06:29:11,601 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T06:29:11,601 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/nfs.dump.dir in system properties and HBase conf 2024-11-10T06:29:11,602 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/java.io.tmpdir in system properties and HBase conf 2024-11-10T06:29:11,602 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T06:29:11,602 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T06:29:11,603 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T06:29:12,089 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T06:29:12,429 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-10T06:29:12,512 INFO [Time-limited test {}] log.Log(170): Logging initialized @2212ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-10T06:29:12,593 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:29:12,660 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:29:12,683 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:29:12,683 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:29:12,685 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T06:29:12,699 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:29:12,701 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c5202f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:29:12,703 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ae7f863{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:29:12,910 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c1a236c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/java.io.tmpdir/jetty-localhost-39363-hadoop-hdfs-3_4_1-tests_jar-_-any-15389476934477771792/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T06:29:12,917 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a625720{HTTP/1.1, (http/1.1)}{localhost:39363} 2024-11-10T06:29:12,917 INFO [Time-limited test {}] server.Server(415): Started @2618ms 2024-11-10T06:29:12,943 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T06:29:13,295 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:29:13,302 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:29:13,303 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:29:13,303 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:29:13,303 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T06:29:13,304 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@460757e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:29:13,305 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e911877{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:29:13,427 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b32dfc5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/java.io.tmpdir/jetty-localhost-35855-hadoop-hdfs-3_4_1-tests_jar-_-any-156388824880180349/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:29:13,428 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43ebd249{HTTP/1.1, (http/1.1)}{localhost:35855} 2024-11-10T06:29:13,428 INFO [Time-limited test {}] server.Server(415): Started @3129ms 2024-11-10T06:29:13,488 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:29:13,617 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:29:13,622 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:29:13,628 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:29:13,628 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:29:13,629 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T06:29:13,630 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@154eec55{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:29:13,631 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@29058af{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:29:13,766 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78ec6c63{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/java.io.tmpdir/jetty-localhost-33413-hadoop-hdfs-3_4_1-tests_jar-_-any-11567681664586289877/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:29:13,767 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@633713c3{HTTP/1.1, (http/1.1)}{localhost:33413} 2024-11-10T06:29:13,767 INFO [Time-limited test {}] server.Server(415): Started @3468ms 2024-11-10T06:29:13,770 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:29:13,961 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/cluster_7c3bcd10-8fd8-bc58-53c9-8bdeb6c9f462/data/data2/current/BP-965741819-172.17.0.2-1731220152180/current, will proceed with Du for space computation calculation, 2024-11-10T06:29:13,961 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/cluster_7c3bcd10-8fd8-bc58-53c9-8bdeb6c9f462/data/data4/current/BP-965741819-172.17.0.2-1731220152180/current, will proceed with Du for space computation calculation, 2024-11-10T06:29:13,961 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/cluster_7c3bcd10-8fd8-bc58-53c9-8bdeb6c9f462/data/data3/current/BP-965741819-172.17.0.2-1731220152180/current, will proceed with Du for space computation calculation, 2024-11-10T06:29:13,961 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/cluster_7c3bcd10-8fd8-bc58-53c9-8bdeb6c9f462/data/data1/current/BP-965741819-172.17.0.2-1731220152180/current, will proceed with Du for space computation calculation, 2024-11-10T06:29:14,022 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:29:14,022 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:29:14,106 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3b33ad018cfc8f4b with lease ID 0x5e5bd6c735b9729e: Processing first storage report for DS-129cc576-bc45-48ab-aa69-bea9b900eeb9 from datanode DatanodeRegistration(127.0.0.1:43049, datanodeUuid=41ec3bb4-9674-468e-9c95-a17c2fd23f96, infoPort=41335, infoSecurePort=0, ipcPort=32919, storageInfo=lv=-57;cid=testClusterID;nsid=1395167805;c=1731220152180) 2024-11-10T06:29:14,108 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3b33ad018cfc8f4b with lease ID 0x5e5bd6c735b9729e: from storage DS-129cc576-bc45-48ab-aa69-bea9b900eeb9 node DatanodeRegistration(127.0.0.1:43049, datanodeUuid=41ec3bb4-9674-468e-9c95-a17c2fd23f96, infoPort=41335, infoSecurePort=0, ipcPort=32919, storageInfo=lv=-57;cid=testClusterID;nsid=1395167805;c=1731220152180), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-10T06:29:14,108 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x10211da7660eb67c with lease ID 0x5e5bd6c735b9729d: Processing first storage report for DS-3fafae90-837c-4050-ac38-5df1b7d896e2 from datanode DatanodeRegistration(127.0.0.1:38839, datanodeUuid=550bacff-55a2-4337-a11d-699b6ff63ea4, infoPort=43059, infoSecurePort=0, ipcPort=36613, storageInfo=lv=-57;cid=testClusterID;nsid=1395167805;c=1731220152180) 2024-11-10T06:29:14,108 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x10211da7660eb67c with lease ID 0x5e5bd6c735b9729d: from storage DS-3fafae90-837c-4050-ac38-5df1b7d896e2 node DatanodeRegistration(127.0.0.1:38839, datanodeUuid=550bacff-55a2-4337-a11d-699b6ff63ea4, infoPort=43059, infoSecurePort=0, ipcPort=36613, storageInfo=lv=-57;cid=testClusterID;nsid=1395167805;c=1731220152180), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:29:14,109 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3b33ad018cfc8f4b with lease ID 0x5e5bd6c735b9729e: Processing first storage report for DS-21903f90-186a-47a1-a3db-d0c2686daa37 from datanode DatanodeRegistration(127.0.0.1:43049, datanodeUuid=41ec3bb4-9674-468e-9c95-a17c2fd23f96, infoPort=41335, infoSecurePort=0, ipcPort=32919, storageInfo=lv=-57;cid=testClusterID;nsid=1395167805;c=1731220152180) 2024-11-10T06:29:14,109 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3b33ad018cfc8f4b with lease ID 0x5e5bd6c735b9729e: from storage DS-21903f90-186a-47a1-a3db-d0c2686daa37 node DatanodeRegistration(127.0.0.1:43049, datanodeUuid=41ec3bb4-9674-468e-9c95-a17c2fd23f96, infoPort=41335, infoSecurePort=0, ipcPort=32919, storageInfo=lv=-57;cid=testClusterID;nsid=1395167805;c=1731220152180), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T06:29:14,109 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x10211da7660eb67c with lease ID 0x5e5bd6c735b9729d: Processing first storage report for DS-ead24c8c-a4df-4eb7-8de2-6daeb0ce6aa6 from datanode DatanodeRegistration(127.0.0.1:38839, datanodeUuid=550bacff-55a2-4337-a11d-699b6ff63ea4, infoPort=43059, infoSecurePort=0, ipcPort=36613, storageInfo=lv=-57;cid=testClusterID;nsid=1395167805;c=1731220152180) 2024-11-10T06:29:14,110 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x10211da7660eb67c with lease ID 0x5e5bd6c735b9729d: from storage DS-ead24c8c-a4df-4eb7-8de2-6daeb0ce6aa6 node DatanodeRegistration(127.0.0.1:38839, datanodeUuid=550bacff-55a2-4337-a11d-699b6ff63ea4, infoPort=43059, infoSecurePort=0, ipcPort=36613, storageInfo=lv=-57;cid=testClusterID;nsid=1395167805;c=1731220152180), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:29:14,184 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969 2024-11-10T06:29:14,264 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/cluster_7c3bcd10-8fd8-bc58-53c9-8bdeb6c9f462/zookeeper_0, clientPort=56126, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/cluster_7c3bcd10-8fd8-bc58-53c9-8bdeb6c9f462/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/cluster_7c3bcd10-8fd8-bc58-53c9-8bdeb6c9f462/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T06:29:14,273 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56126 2024-11-10T06:29:14,288 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:29:14,292 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:29:14,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741825_1001 (size=7) 2024-11-10T06:29:14,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741825_1001 (size=7) 2024-11-10T06:29:14,951 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857 with version=8 2024-11-10T06:29:14,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/hbase-staging 2024-11-10T06:29:15,042 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-10T06:29:15,289 INFO [Time-limited test {}] client.ConnectionUtils(128): master/4999977c7e1b:0 server-side Connection retries=45 2024-11-10T06:29:15,301 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:29:15,301 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T06:29:15,306 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T06:29:15,306 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:29:15,306 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T06:29:15,441 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T06:29:15,502 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-10T06:29:15,510 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-10T06:29:15,514 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T06:29:15,541 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 474 (auto-detected) 2024-11-10T06:29:15,542 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-10T06:29:15,560 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40045 2024-11-10T06:29:15,581 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40045 connecting to ZooKeeper ensemble=127.0.0.1:56126 2024-11-10T06:29:15,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:400450x0, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T06:29:15,618 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40045-0x10190de9cef0000 connected 2024-11-10T06:29:15,647 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:29:15,650 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:29:15,662 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:29:15,666 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857, hbase.cluster.distributed=false 2024-11-10T06:29:15,689 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T06:29:15,693 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40045 2024-11-10T06:29:15,693 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40045 2024-11-10T06:29:15,694 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40045 2024-11-10T06:29:15,694 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40045 2024-11-10T06:29:15,695 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40045 2024-11-10T06:29:15,806 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4999977c7e1b:0 server-side Connection retries=45 2024-11-10T06:29:15,808 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:29:15,809 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T06:29:15,809 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T06:29:15,809 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:29:15,809 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T06:29:15,812 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T06:29:15,814 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T06:29:15,815 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43167 2024-11-10T06:29:15,817 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43167 connecting to ZooKeeper ensemble=127.0.0.1:56126 2024-11-10T06:29:15,818 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:29:15,822 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:29:15,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:431670x0, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T06:29:15,829 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:431670x0, quorum=127.0.0.1:56126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:29:15,830 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43167-0x10190de9cef0001 connected 2024-11-10T06:29:15,833 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T06:29:15,841 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T06:29:15,843 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43167-0x10190de9cef0001, quorum=127.0.0.1:56126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T06:29:15,848 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43167-0x10190de9cef0001, quorum=127.0.0.1:56126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T06:29:15,849 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43167 2024-11-10T06:29:15,849 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43167 2024-11-10T06:29:15,850 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43167 2024-11-10T06:29:15,850 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43167 2024-11-10T06:29:15,852 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43167 2024-11-10T06:29:15,868 DEBUG [M:0;4999977c7e1b:40045 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;4999977c7e1b:40045 2024-11-10T06:29:15,869 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/4999977c7e1b,40045,1731220155094 2024-11-10T06:29:15,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43167-0x10190de9cef0001, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:29:15,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:29:15,878 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/4999977c7e1b,40045,1731220155094 2024-11-10T06:29:15,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43167-0x10190de9cef0001, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T06:29:15,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:29:15,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43167-0x10190de9cef0001, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:29:15,903 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T06:29:15,904 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/4999977c7e1b,40045,1731220155094 from backup master directory 2024-11-10T06:29:15,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/4999977c7e1b,40045,1731220155094 2024-11-10T06:29:15,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43167-0x10190de9cef0001, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:29:15,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:29:15,908 WARN [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T06:29:15,908 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=4999977c7e1b,40045,1731220155094 2024-11-10T06:29:15,910 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-10T06:29:15,912 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-10T06:29:15,970 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/hbase.id] with ID: eac5a620-3c42-479a-a42c-aa8ee1df2bc0 2024-11-10T06:29:15,971 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/.tmp/hbase.id 2024-11-10T06:29:15,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741826_1002 (size=42) 2024-11-10T06:29:15,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741826_1002 (size=42) 2024-11-10T06:29:15,984 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/.tmp/hbase.id]:[hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/hbase.id] 2024-11-10T06:29:16,027 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:29:16,032 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T06:29:16,051 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-10T06:29:16,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:29:16,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43167-0x10190de9cef0001, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:29:16,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741827_1003 (size=196) 2024-11-10T06:29:16,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741827_1003 (size=196) 2024-11-10T06:29:16,089 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T06:29:16,091 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T06:29:16,097 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:29:16,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741828_1004 (size=1189) 2024-11-10T06:29:16,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741828_1004 (size=1189) 2024-11-10T06:29:16,148 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store 2024-11-10T06:29:16,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741829_1005 (size=34) 2024-11-10T06:29:16,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741829_1005 (size=34) 2024-11-10T06:29:16,173 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-10T06:29:16,178 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:29:16,179 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T06:29:16,180 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:29:16,180 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:29:16,182 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T06:29:16,183 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:29:16,183 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:29:16,185 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731220156179Disabling compacts and flushes for region at 1731220156179Disabling writes for close at 1731220156182 (+3 ms)Writing region close event to WAL at 1731220156183 (+1 ms)Closed at 1731220156183 2024-11-10T06:29:16,187 WARN [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/.initializing 2024-11-10T06:29:16,187 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/WALs/4999977c7e1b,40045,1731220155094 2024-11-10T06:29:16,213 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C40045%2C1731220155094, suffix=, logDir=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/WALs/4999977c7e1b,40045,1731220155094, archiveDir=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/oldWALs, maxLogs=10 2024-11-10T06:29:16,226 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C40045%2C1731220155094.1731220156220 2024-11-10T06:29:16,250 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/WALs/4999977c7e1b,40045,1731220155094/4999977c7e1b%2C40045%2C1731220155094.1731220156220 2024-11-10T06:29:16,259 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41335:41335),(127.0.0.1/127.0.0.1:43059:43059)] 2024-11-10T06:29:16,260 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T06:29:16,260 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:29:16,263 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:29:16,264 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:29:16,301 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:29:16,330 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T06:29:16,334 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:29:16,337 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:29:16,337 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:29:16,340 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T06:29:16,341 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:29:16,342 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:29:16,342 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:29:16,345 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T06:29:16,345 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:29:16,346 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:29:16,346 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:29:16,349 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T06:29:16,349 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:29:16,350 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:29:16,350 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:29:16,353 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:29:16,355 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:29:16,359 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:29:16,360 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:29:16,363 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T06:29:16,366 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:29:16,371 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T06:29:16,372 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=878123, jitterRate=0.11659207940101624}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T06:29:16,378 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731220156276Initializing all the Stores at 1731220156278 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220156279 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220156279Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220156280 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220156280Cleaning up temporary data from old regions at 1731220156360 (+80 ms)Region opened successfully at 1731220156377 (+17 ms) 2024-11-10T06:29:16,379 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T06:29:16,415 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18ba1002, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4999977c7e1b/172.17.0.2:0 2024-11-10T06:29:16,447 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T06:29:16,458 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T06:29:16,458 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T06:29:16,462 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T06:29:16,463 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-10T06:29:16,468 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-10T06:29:16,468 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T06:29:16,501 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T06:29:16,512 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T06:29:16,516 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T06:29:16,518 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T06:29:16,520 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T06:29:16,522 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T06:29:16,524 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T06:29:16,527 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T06:29:16,529 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T06:29:16,531 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T06:29:16,532 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T06:29:16,549 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T06:29:16,550 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T06:29:16,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43167-0x10190de9cef0001, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T06:29:16,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T06:29:16,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43167-0x10190de9cef0001, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:29:16,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:29:16,557 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=4999977c7e1b,40045,1731220155094, sessionid=0x10190de9cef0000, setting cluster-up flag (Was=false) 2024-11-10T06:29:16,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43167-0x10190de9cef0001, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:29:16,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:29:16,578 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T06:29:16,580 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4999977c7e1b,40045,1731220155094 2024-11-10T06:29:16,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43167-0x10190de9cef0001, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:29:16,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:29:16,591 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T06:29:16,593 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4999977c7e1b,40045,1731220155094 2024-11-10T06:29:16,599 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T06:29:16,656 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.HRegionServer(746): ClusterId : eac5a620-3c42-479a-a42c-aa8ee1df2bc0 2024-11-10T06:29:16,659 DEBUG [RS:0;4999977c7e1b:43167 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T06:29:16,665 DEBUG [RS:0;4999977c7e1b:43167 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T06:29:16,665 DEBUG [RS:0;4999977c7e1b:43167 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T06:29:16,670 DEBUG [RS:0;4999977c7e1b:43167 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T06:29:16,670 DEBUG [RS:0;4999977c7e1b:43167 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7004477a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4999977c7e1b/172.17.0.2:0 2024-11-10T06:29:16,673 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T06:29:16,683 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T06:29:16,686 DEBUG [RS:0;4999977c7e1b:43167 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;4999977c7e1b:43167 2024-11-10T06:29:16,689 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T06:29:16,689 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T06:29:16,689 DEBUG [RS:0;4999977c7e1b:43167 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T06:29:16,689 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T06:29:16,692 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.HRegionServer(2659): reportForDuty to master=4999977c7e1b,40045,1731220155094 with port=43167, startcode=1731220155767 2024-11-10T06:29:16,695 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 4999977c7e1b,40045,1731220155094 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T06:29:16,703 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:29:16,703 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:29:16,703 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:29:16,704 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:29:16,704 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/4999977c7e1b:0, corePoolSize=10, maxPoolSize=10 2024-11-10T06:29:16,704 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:29:16,704 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/4999977c7e1b:0, corePoolSize=2, maxPoolSize=2 2024-11-10T06:29:16,704 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:29:16,705 DEBUG [RS:0;4999977c7e1b:43167 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T06:29:16,706 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731220186705 2024-11-10T06:29:16,708 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T06:29:16,709 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T06:29:16,710 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:29:16,710 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T06:29:16,713 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T06:29:16,714 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T06:29:16,714 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T06:29:16,715 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T06:29:16,717 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:29:16,715 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:16,717 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T06:29:16,719 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T06:29:16,720 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T06:29:16,721 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T06:29:16,723 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T06:29:16,724 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T06:29:16,729 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220156725,5,FailOnTimeoutGroup] 2024-11-10T06:29:16,730 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220156729,5,FailOnTimeoutGroup] 2024-11-10T06:29:16,730 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:16,730 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T06:29:16,731 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:16,732 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:16,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741831_1007 (size=1321) 2024-11-10T06:29:16,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741831_1007 (size=1321) 2024-11-10T06:29:16,736 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T06:29:16,737 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857 2024-11-10T06:29:16,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741832_1008 (size=32) 2024-11-10T06:29:16,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741832_1008 (size=32) 2024-11-10T06:29:16,754 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:29:16,759 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T06:29:16,762 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T06:29:16,762 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:29:16,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:29:16,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T06:29:16,765 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T06:29:16,766 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:29:16,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:29:16,767 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T06:29:16,770 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T06:29:16,770 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:29:16,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:29:16,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T06:29:16,774 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T06:29:16,774 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:29:16,775 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:29:16,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T06:29:16,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740 2024-11-10T06:29:16,778 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740 2024-11-10T06:29:16,781 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T06:29:16,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T06:29:16,783 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T06:29:16,785 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T06:29:16,789 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57505, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T06:29:16,794 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T06:29:16,795 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=749460, jitterRate=-0.047012925148010254}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T06:29:16,797 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40045 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4999977c7e1b,43167,1731220155767 2024-11-10T06:29:16,799 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731220156754Initializing all the Stores at 1731220156756 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220156756Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220156756Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220156756Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220156756Cleaning up temporary data from old regions at 1731220156782 (+26 ms)Region opened successfully at 1731220156799 (+17 ms) 2024-11-10T06:29:16,799 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T06:29:16,800 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T06:29:16,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T06:29:16,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T06:29:16,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T06:29:16,801 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40045 {}] master.ServerManager(517): Registering regionserver=4999977c7e1b,43167,1731220155767 2024-11-10T06:29:16,803 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T06:29:16,803 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731220156799Disabling compacts and flushes for region at 1731220156799Disabling writes for close at 1731220156800 (+1 ms)Writing region close event to WAL at 1731220156802 (+2 ms)Closed at 1731220156803 (+1 ms) 2024-11-10T06:29:16,807 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:29:16,807 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T06:29:16,813 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T06:29:16,817 DEBUG [RS:0;4999977c7e1b:43167 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857 2024-11-10T06:29:16,818 DEBUG [RS:0;4999977c7e1b:43167 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37837 2024-11-10T06:29:16,818 DEBUG [RS:0;4999977c7e1b:43167 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T06:29:16,821 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T06:29:16,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T06:29:16,823 DEBUG [RS:0;4999977c7e1b:43167 {}] zookeeper.ZKUtil(111): regionserver:43167-0x10190de9cef0001, quorum=127.0.0.1:56126, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4999977c7e1b,43167,1731220155767 2024-11-10T06:29:16,824 WARN [RS:0;4999977c7e1b:43167 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T06:29:16,824 INFO [RS:0;4999977c7e1b:43167 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:29:16,824 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T06:29:16,824 DEBUG [RS:0;4999977c7e1b:43167 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767 2024-11-10T06:29:16,826 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4999977c7e1b,43167,1731220155767] 2024-11-10T06:29:16,849 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T06:29:16,864 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T06:29:16,869 INFO [RS:0;4999977c7e1b:43167 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T06:29:16,869 INFO [RS:0;4999977c7e1b:43167 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:16,870 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T06:29:16,876 INFO [RS:0;4999977c7e1b:43167 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T06:29:16,877 INFO [RS:0;4999977c7e1b:43167 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:16,878 DEBUG [RS:0;4999977c7e1b:43167 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:29:16,878 DEBUG [RS:0;4999977c7e1b:43167 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:29:16,878 DEBUG [RS:0;4999977c7e1b:43167 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:29:16,878 DEBUG [RS:0;4999977c7e1b:43167 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:29:16,879 DEBUG [RS:0;4999977c7e1b:43167 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:29:16,879 DEBUG [RS:0;4999977c7e1b:43167 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4999977c7e1b:0, corePoolSize=2, maxPoolSize=2 2024-11-10T06:29:16,879 DEBUG [RS:0;4999977c7e1b:43167 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:29:16,880 DEBUG [RS:0;4999977c7e1b:43167 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:29:16,880 DEBUG [RS:0;4999977c7e1b:43167 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:29:16,880 DEBUG [RS:0;4999977c7e1b:43167 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:29:16,880 DEBUG [RS:0;4999977c7e1b:43167 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:29:16,881 DEBUG [RS:0;4999977c7e1b:43167 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:29:16,881 DEBUG [RS:0;4999977c7e1b:43167 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4999977c7e1b:0, corePoolSize=3, maxPoolSize=3 2024-11-10T06:29:16,881 DEBUG [RS:0;4999977c7e1b:43167 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0, corePoolSize=3, maxPoolSize=3 2024-11-10T06:29:16,882 INFO [RS:0;4999977c7e1b:43167 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:16,882 INFO [RS:0;4999977c7e1b:43167 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:16,883 INFO [RS:0;4999977c7e1b:43167 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:16,883 INFO [RS:0;4999977c7e1b:43167 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:16,883 INFO [RS:0;4999977c7e1b:43167 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:16,883 INFO [RS:0;4999977c7e1b:43167 {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,43167,1731220155767-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T06:29:16,902 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T06:29:16,904 INFO [RS:0;4999977c7e1b:43167 {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,43167,1731220155767-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:16,905 INFO [RS:0;4999977c7e1b:43167 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:16,905 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.Replication(171): 4999977c7e1b,43167,1731220155767 started 2024-11-10T06:29:16,923 INFO [RS:0;4999977c7e1b:43167 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:16,923 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.HRegionServer(1482): Serving as 4999977c7e1b,43167,1731220155767, RpcServer on 4999977c7e1b/172.17.0.2:43167, sessionid=0x10190de9cef0001 2024-11-10T06:29:16,924 DEBUG [RS:0;4999977c7e1b:43167 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T06:29:16,925 DEBUG [RS:0;4999977c7e1b:43167 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4999977c7e1b,43167,1731220155767 2024-11-10T06:29:16,925 DEBUG [RS:0;4999977c7e1b:43167 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4999977c7e1b,43167,1731220155767' 2024-11-10T06:29:16,925 DEBUG [RS:0;4999977c7e1b:43167 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T06:29:16,926 DEBUG [RS:0;4999977c7e1b:43167 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T06:29:16,927 DEBUG [RS:0;4999977c7e1b:43167 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T06:29:16,927 DEBUG [RS:0;4999977c7e1b:43167 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T06:29:16,927 DEBUG [RS:0;4999977c7e1b:43167 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4999977c7e1b,43167,1731220155767 2024-11-10T06:29:16,927 DEBUG [RS:0;4999977c7e1b:43167 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4999977c7e1b,43167,1731220155767' 2024-11-10T06:29:16,927 DEBUG [RS:0;4999977c7e1b:43167 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T06:29:16,928 DEBUG [RS:0;4999977c7e1b:43167 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T06:29:16,929 DEBUG [RS:0;4999977c7e1b:43167 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T06:29:16,929 INFO [RS:0;4999977c7e1b:43167 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T06:29:16,929 INFO [RS:0;4999977c7e1b:43167 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T06:29:16,975 WARN [4999977c7e1b:40045 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-10T06:29:17,038 INFO [RS:0;4999977c7e1b:43167 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C43167%2C1731220155767, suffix=, logDir=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767, archiveDir=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/oldWALs, maxLogs=32 2024-11-10T06:29:17,040 INFO [RS:0;4999977c7e1b:43167 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C43167%2C1731220155767.1731220157040 2024-11-10T06:29:17,049 INFO [RS:0;4999977c7e1b:43167 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220157040 2024-11-10T06:29:17,051 DEBUG [RS:0;4999977c7e1b:43167 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43059:43059),(127.0.0.1/127.0.0.1:41335:41335)] 2024-11-10T06:29:17,227 DEBUG [4999977c7e1b:40045 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-10T06:29:17,240 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=4999977c7e1b,43167,1731220155767 2024-11-10T06:29:17,246 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4999977c7e1b,43167,1731220155767, state=OPENING 2024-11-10T06:29:17,252 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T06:29:17,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:29:17,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43167-0x10190de9cef0001, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:29:17,255 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:29:17,255 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:29:17,256 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T06:29:17,258 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=4999977c7e1b,43167,1731220155767}] 2024-11-10T06:29:17,434 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T06:29:17,437 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47967, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T06:29:17,450 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T06:29:17,450 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:29:17,454 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C43167%2C1731220155767.meta, suffix=.meta, logDir=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767, archiveDir=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/oldWALs, maxLogs=32 2024-11-10T06:29:17,456 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C43167%2C1731220155767.meta.1731220157455.meta 2024-11-10T06:29:17,465 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.meta.1731220157455.meta 2024-11-10T06:29:17,466 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41335:41335),(127.0.0.1/127.0.0.1:43059:43059)] 2024-11-10T06:29:17,467 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T06:29:17,469 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T06:29:17,472 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T06:29:17,477 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T06:29:17,482 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T06:29:17,483 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:29:17,483 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T06:29:17,484 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T06:29:17,487 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T06:29:17,488 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T06:29:17,488 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:29:17,489 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:29:17,489 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T06:29:17,491 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T06:29:17,491 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:29:17,492 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:29:17,492 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T06:29:17,493 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T06:29:17,493 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:29:17,494 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:29:17,494 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T06:29:17,496 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T06:29:17,496 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:29:17,497 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:29:17,497 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T06:29:17,498 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740 2024-11-10T06:29:17,501 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740 2024-11-10T06:29:17,503 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T06:29:17,503 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T06:29:17,504 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T06:29:17,507 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T06:29:17,509 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=852462, jitterRate=0.08396263420581818}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T06:29:17,509 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T06:29:17,510 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731220157484Writing region info on filesystem at 1731220157484Initializing all the Stores at 1731220157486 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220157486Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220157486Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220157486Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220157486Cleaning up temporary data from old regions at 1731220157503 (+17 ms)Running coprocessor post-open hooks at 1731220157509 (+6 ms)Region opened successfully at 1731220157510 (+1 ms) 2024-11-10T06:29:17,518 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731220157425 2024-11-10T06:29:17,530 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T06:29:17,530 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T06:29:17,532 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=4999977c7e1b,43167,1731220155767 2024-11-10T06:29:17,534 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4999977c7e1b,43167,1731220155767, state=OPEN 2024-11-10T06:29:17,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43167-0x10190de9cef0001, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T06:29:17,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T06:29:17,539 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:29:17,539 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:29:17,539 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=4999977c7e1b,43167,1731220155767 2024-11-10T06:29:17,545 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T06:29:17,545 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=4999977c7e1b,43167,1731220155767 in 281 msec 2024-11-10T06:29:17,552 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T06:29:17,552 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 734 msec 2024-11-10T06:29:17,554 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:29:17,554 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T06:29:17,575 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T06:29:17,576 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4999977c7e1b,43167,1731220155767, seqNum=-1] 2024-11-10T06:29:17,598 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T06:29:17,600 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36087, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T06:29:17,620 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 991 msec 2024-11-10T06:29:17,620 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731220157620, completionTime=-1 2024-11-10T06:29:17,623 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-10T06:29:17,623 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-10T06:29:17,651 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-10T06:29:17,651 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731220217651 2024-11-10T06:29:17,652 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731220277652 2024-11-10T06:29:17,652 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 28 msec 2024-11-10T06:29:17,655 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,40045,1731220155094-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:17,655 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,40045,1731220155094-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:17,655 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,40045,1731220155094-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:17,657 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-4999977c7e1b:40045, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:17,657 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:17,658 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:17,664 DEBUG [master/4999977c7e1b:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T06:29:17,686 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.777sec 2024-11-10T06:29:17,688 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T06:29:17,689 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T06:29:17,691 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T06:29:17,691 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T06:29:17,692 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T06:29:17,692 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,40045,1731220155094-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T06:29:17,693 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,40045,1731220155094-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T06:29:17,702 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T06:29:17,703 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T06:29:17,703 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,40045,1731220155094-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:29:17,768 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18b51bbe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:29:17,770 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-10T06:29:17,771 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-10T06:29:17,774 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 4999977c7e1b,40045,-1 for getting cluster id 2024-11-10T06:29:17,777 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T06:29:17,786 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'eac5a620-3c42-479a-a42c-aa8ee1df2bc0' 2024-11-10T06:29:17,789 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T06:29:17,789 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "eac5a620-3c42-479a-a42c-aa8ee1df2bc0" 2024-11-10T06:29:17,792 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6914c27c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:29:17,792 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4999977c7e1b,40045,-1] 2024-11-10T06:29:17,794 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T06:29:17,796 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:29:17,798 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52228, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T06:29:17,801 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@543c1b08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:29:17,802 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T06:29:17,809 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4999977c7e1b,43167,1731220155767, seqNum=-1] 2024-11-10T06:29:17,810 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T06:29:17,812 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48576, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T06:29:17,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=4999977c7e1b,40045,1731220155094 2024-11-10T06:29:17,834 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:29:17,843 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-10T06:29:17,847 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-10T06:29:17,852 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 4999977c7e1b,40045,1731220155094 2024-11-10T06:29:17,855 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@206c080d 2024-11-10T06:29:17,856 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-10T06:29:17,859 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52242, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-10T06:29:17,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40045 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-10T06:29:17,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40045 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-10T06:29:17,885 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40045 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T06:29:17,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40045 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-10T06:29:17,899 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-10T06:29:17,901 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40045 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-10T06:29:17,901 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:29:17,903 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-10T06:29:17,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40045 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T06:29:17,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741835_1011 (size=389) 2024-11-10T06:29:17,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741835_1011 (size=389) 2024-11-10T06:29:17,956 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7f79a9825bb425f886442caf587a2d2c, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857 2024-11-10T06:29:17,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741836_1012 (size=72) 2024-11-10T06:29:17,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741836_1012 (size=72) 2024-11-10T06:29:17,967 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:29:17,967 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 7f79a9825bb425f886442caf587a2d2c, disabling compactions & flushes 2024-11-10T06:29:17,967 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c. 2024-11-10T06:29:17,967 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c. 2024-11-10T06:29:17,967 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c. after waiting 0 ms 2024-11-10T06:29:17,967 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c. 2024-11-10T06:29:17,967 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c. 2024-11-10T06:29:17,967 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7f79a9825bb425f886442caf587a2d2c: Waiting for close lock at 1731220157967Disabling compacts and flushes for region at 1731220157967Disabling writes for close at 1731220157967Writing region close event to WAL at 1731220157967Closed at 1731220157967 2024-11-10T06:29:17,969 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-10T06:29:17,974 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731220157970"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731220157970"}]},"ts":"1731220157970"} 2024-11-10T06:29:17,980 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-10T06:29:17,982 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-10T06:29:17,985 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731220157982"}]},"ts":"1731220157982"} 2024-11-10T06:29:17,989 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-10T06:29:17,991 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=7f79a9825bb425f886442caf587a2d2c, ASSIGN}] 2024-11-10T06:29:17,993 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=7f79a9825bb425f886442caf587a2d2c, ASSIGN 2024-11-10T06:29:17,995 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=7f79a9825bb425f886442caf587a2d2c, ASSIGN; state=OFFLINE, location=4999977c7e1b,43167,1731220155767; forceNewPlan=false, retain=false 2024-11-10T06:29:18,146 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7f79a9825bb425f886442caf587a2d2c, regionState=OPENING, regionLocation=4999977c7e1b,43167,1731220155767 2024-11-10T06:29:18,151 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=7f79a9825bb425f886442caf587a2d2c, ASSIGN because future has completed 2024-11-10T06:29:18,152 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7f79a9825bb425f886442caf587a2d2c, server=4999977c7e1b,43167,1731220155767}] 2024-11-10T06:29:18,314 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c. 2024-11-10T06:29:18,314 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 7f79a9825bb425f886442caf587a2d2c, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c.', STARTKEY => '', ENDKEY => ''} 2024-11-10T06:29:18,315 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 7f79a9825bb425f886442caf587a2d2c 2024-11-10T06:29:18,315 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:29:18,315 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 7f79a9825bb425f886442caf587a2d2c 2024-11-10T06:29:18,315 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 7f79a9825bb425f886442caf587a2d2c 2024-11-10T06:29:18,317 INFO [StoreOpener-7f79a9825bb425f886442caf587a2d2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7f79a9825bb425f886442caf587a2d2c 2024-11-10T06:29:18,320 INFO [StoreOpener-7f79a9825bb425f886442caf587a2d2c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7f79a9825bb425f886442caf587a2d2c columnFamilyName info 2024-11-10T06:29:18,320 DEBUG [StoreOpener-7f79a9825bb425f886442caf587a2d2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:29:18,321 INFO [StoreOpener-7f79a9825bb425f886442caf587a2d2c-1 {}] regionserver.HStore(327): Store=7f79a9825bb425f886442caf587a2d2c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:29:18,322 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 7f79a9825bb425f886442caf587a2d2c 2024-11-10T06:29:18,323 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c 2024-11-10T06:29:18,323 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c 2024-11-10T06:29:18,324 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 7f79a9825bb425f886442caf587a2d2c 2024-11-10T06:29:18,324 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 7f79a9825bb425f886442caf587a2d2c 2024-11-10T06:29:18,326 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 7f79a9825bb425f886442caf587a2d2c 2024-11-10T06:29:18,330 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T06:29:18,330 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 7f79a9825bb425f886442caf587a2d2c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=731204, jitterRate=-0.07022729516029358}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T06:29:18,330 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7f79a9825bb425f886442caf587a2d2c 2024-11-10T06:29:18,331 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 7f79a9825bb425f886442caf587a2d2c: Running coprocessor pre-open hook at 1731220158315Writing region info on filesystem at 1731220158315Initializing all the Stores at 1731220158317 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220158317Cleaning up temporary data from old regions at 1731220158324 (+7 ms)Running coprocessor post-open hooks at 1731220158330 (+6 ms)Region opened successfully at 1731220158331 (+1 ms) 2024-11-10T06:29:18,333 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c., pid=6, masterSystemTime=1731220158306 2024-11-10T06:29:18,337 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c. 2024-11-10T06:29:18,338 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c. 2024-11-10T06:29:18,339 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7f79a9825bb425f886442caf587a2d2c, regionState=OPEN, openSeqNum=2, regionLocation=4999977c7e1b,43167,1731220155767 2024-11-10T06:29:18,343 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7f79a9825bb425f886442caf587a2d2c, server=4999977c7e1b,43167,1731220155767 because future has completed 2024-11-10T06:29:18,349 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-10T06:29:18,349 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 7f79a9825bb425f886442caf587a2d2c, server=4999977c7e1b,43167,1731220155767 in 193 msec 2024-11-10T06:29:18,353 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-10T06:29:18,353 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=7f79a9825bb425f886442caf587a2d2c, ASSIGN in 358 msec 2024-11-10T06:29:18,355 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-10T06:29:18,355 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731220158355"}]},"ts":"1731220158355"} 2024-11-10T06:29:18,359 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-10T06:29:18,360 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-10T06:29:18,363 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 471 msec 2024-11-10T06:29:22,986 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-10T06:29:23,032 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-10T06:29:23,034 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-10T06:29:25,499 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-10T06:29:25,500 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-10T06:29:25,501 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-10T06:29:25,501 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-10T06:29:25,502 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T06:29:25,503 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-10T06:29:25,503 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-10T06:29:25,503 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-10T06:29:27,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40045 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T06:29:27,957 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-10T06:29:27,960 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-10T06:29:27,966 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-10T06:29:27,967 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c. 2024-11-10T06:29:27,968 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C43167%2C1731220155767.1731220167968 2024-11-10T06:29:27,977 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:29:27,977 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:29:27,978 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:29:27,978 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:29:27,978 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:29:27,978 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220157040 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220167968 2024-11-10T06:29:27,980 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41335:41335),(127.0.0.1/127.0.0.1:43059:43059)] 2024-11-10T06:29:27,980 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220157040 is not closed yet, will try archiving it next time 2024-11-10T06:29:27,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741833_1009 (size=451) 2024-11-10T06:29:27,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741833_1009 (size=451) 2024-11-10T06:29:27,985 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220157040 to hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/oldWALs/4999977c7e1b%2C43167%2C1731220155767.1731220157040 2024-11-10T06:29:27,990 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c., hostname=4999977c7e1b,43167,1731220155767, seqNum=2] 2024-11-10T06:29:40,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43167 {}] regionserver.HRegion(8855): Flush requested on 7f79a9825bb425f886442caf587a2d2c 2024-11-10T06:29:40,025 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7f79a9825bb425f886442caf587a2d2c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-10T06:29:40,086 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/.tmp/info/df914c043a2d4d2abe3078b1f3905392 is 1080, key is row0001/info:/1731220167992/Put/seqid=0 2024-11-10T06:29:40,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741838_1014 (size=12509) 2024-11-10T06:29:40,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741838_1014 (size=12509) 2024-11-10T06:29:40,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/.tmp/info/df914c043a2d4d2abe3078b1f3905392 2024-11-10T06:29:40,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/.tmp/info/df914c043a2d4d2abe3078b1f3905392 as hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/df914c043a2d4d2abe3078b1f3905392 2024-11-10T06:29:40,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/df914c043a2d4d2abe3078b1f3905392, entries=7, sequenceid=11, filesize=12.2 K 2024-11-10T06:29:40,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 7f79a9825bb425f886442caf587a2d2c in 139ms, sequenceid=11, compaction requested=false 2024-11-10T06:29:40,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7f79a9825bb425f886442caf587a2d2c: 2024-11-10T06:29:44,181 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T06:29:48,034 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C43167%2C1731220155767.1731220188034 2024-11-10T06:29:48,244 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK], DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK]] 2024-11-10T06:29:48,244 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:29:48,244 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:29:48,245 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:29:48,245 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:29:48,245 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:29:48,245 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220167968 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220188034 2024-11-10T06:29:48,247 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41335:41335),(127.0.0.1/127.0.0.1:43059:43059)] 2024-11-10T06:29:48,247 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220167968 is not closed yet, will try archiving it next time 2024-11-10T06:29:48,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741837_1013 (size=12399) 2024-11-10T06:29:48,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741837_1013 (size=12399) 2024-11-10T06:29:48,450 INFO [FSHLog-0-hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857-prefix:4999977c7e1b,43167,1731220155767 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK], DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK]] 2024-11-10T06:29:50,654 INFO [FSHLog-0-hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857-prefix:4999977c7e1b,43167,1731220155767 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK], DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK]] 2024-11-10T06:29:52,858 INFO [FSHLog-0-hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857-prefix:4999977c7e1b,43167,1731220155767 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK], DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK]] 2024-11-10T06:29:55,062 INFO [FSHLog-0-hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857-prefix:4999977c7e1b,43167,1731220155767 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK], DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK]] 2024-11-10T06:29:55,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43167 {}] regionserver.HRegion(8855): Flush requested on 7f79a9825bb425f886442caf587a2d2c 2024-11-10T06:29:55,063 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7f79a9825bb425f886442caf587a2d2c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-10T06:29:55,265 INFO [FSHLog-0-hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857-prefix:4999977c7e1b,43167,1731220155767 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK], DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK]] 2024-11-10T06:29:55,271 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/.tmp/info/40810b445dd74a36b64a12fb6baac5ef is 1080, key is row0008/info:/1731220182023/Put/seqid=0 2024-11-10T06:29:55,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741840_1016 (size=12509) 2024-11-10T06:29:55,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741840_1016 (size=12509) 2024-11-10T06:29:55,280 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/.tmp/info/40810b445dd74a36b64a12fb6baac5ef 2024-11-10T06:29:55,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/.tmp/info/40810b445dd74a36b64a12fb6baac5ef as hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/40810b445dd74a36b64a12fb6baac5ef 2024-11-10T06:29:55,300 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/40810b445dd74a36b64a12fb6baac5ef, entries=7, sequenceid=21, filesize=12.2 K 2024-11-10T06:29:55,502 INFO [FSHLog-0-hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857-prefix:4999977c7e1b,43167,1731220155767 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK], DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK]] 2024-11-10T06:29:55,502 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 7f79a9825bb425f886442caf587a2d2c in 439ms, sequenceid=21, compaction requested=false 2024-11-10T06:29:55,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7f79a9825bb425f886442caf587a2d2c: 2024-11-10T06:29:55,502 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-10T06:29:55,503 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:29:55,503 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/df914c043a2d4d2abe3078b1f3905392 because midkey is the same as first or last row 2024-11-10T06:29:57,267 INFO [FSHLog-0-hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857-prefix:4999977c7e1b,43167,1731220155767 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK], DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK]] 2024-11-10T06:29:58,670 INFO [master/4999977c7e1b:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-10T06:29:58,670 INFO [master/4999977c7e1b:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-10T06:29:59,470 INFO [FSHLog-0-hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857-prefix:4999977c7e1b,43167,1731220155767 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK], DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK]] 2024-11-10T06:29:59,472 WARN [FSHLog-0-hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857-prefix:4999977c7e1b,43167,1731220155767 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK], DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK]] 2024-11-10T06:29:59,473 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 4999977c7e1b%2C43167%2C1731220155767:(num 1731220188034) roll requested 2024-11-10T06:29:59,474 INFO [regionserver/4999977c7e1b:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C43167%2C1731220155767.1731220199474 2024-11-10T06:29:59,682 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK], DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK]] 2024-11-10T06:29:59,682 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:29:59,682 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:29:59,682 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:29:59,683 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:29:59,683 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:29:59,683 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220188034 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220199474 2024-11-10T06:29:59,684 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43059:43059),(127.0.0.1/127.0.0.1:41335:41335)] 2024-11-10T06:29:59,684 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220188034 is not closed yet, will try archiving it next time 2024-11-10T06:29:59,684 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220167968 to hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/oldWALs/4999977c7e1b%2C43167%2C1731220155767.1731220167968 2024-11-10T06:29:59,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741839_1015 (size=7739) 2024-11-10T06:29:59,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741839_1015 (size=7739) 2024-11-10T06:30:01,674 INFO [FSHLog-0-hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857-prefix:4999977c7e1b,43167,1731220155767 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK], DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK]] 2024-11-10T06:30:03,315 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 7f79a9825bb425f886442caf587a2d2c, had cached 0 bytes from a total of 25018 2024-11-10T06:30:03,878 INFO [FSHLog-0-hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857-prefix:4999977c7e1b,43167,1731220155767 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK], DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK]] 2024-11-10T06:30:06,083 INFO [FSHLog-0-hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857-prefix:4999977c7e1b,43167,1731220155767 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK], DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK]] 2024-11-10T06:30:08,286 INFO [FSHLog-0-hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857-prefix:4999977c7e1b,43167,1731220155767 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK], DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK]] 2024-11-10T06:30:10,288 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T06:30:10,289 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C43167%2C1731220155767.1731220210289 2024-11-10T06:30:14,181 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T06:30:15,297 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK], DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK]] 2024-11-10T06:30:15,299 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK], DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK]] 2024-11-10T06:30:15,300 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 4999977c7e1b%2C43167%2C1731220155767:(num 1731220210289) roll requested 2024-11-10T06:30:15,300 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:15,300 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:15,300 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:15,300 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:15,300 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:15,301 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220199474 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220210289 2024-11-10T06:30:15,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741841_1017 (size=4753) 2024-11-10T06:30:15,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741841_1017 (size=4753) 2024-11-10T06:30:15,304 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43059:43059),(127.0.0.1/127.0.0.1:41335:41335)] 2024-11-10T06:30:15,304 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220199474 is not closed yet, will try archiving it next time 2024-11-10T06:30:15,304 INFO [regionserver/4999977c7e1b:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C43167%2C1731220155767.1731220215304 2024-11-10T06:30:20,307 INFO [FSHLog-0-hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857-prefix:4999977c7e1b,43167,1731220155767 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK], DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK]] 2024-11-10T06:30:20,308 WARN [FSHLog-0-hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857-prefix:4999977c7e1b,43167,1731220155767 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK], DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK]] 2024-11-10T06:30:20,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43167 {}] regionserver.HRegion(8855): Flush requested on 7f79a9825bb425f886442caf587a2d2c 2024-11-10T06:30:20,308 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7f79a9825bb425f886442caf587a2d2c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-10T06:30:20,314 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK], DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK]] 2024-11-10T06:30:20,314 WARN [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK], DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK]] 2024-11-10T06:30:22,309 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T06:30:25,310 INFO [FSHLog-0-hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857-prefix:4999977c7e1b,43167,1731220155767 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK], DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK]] 2024-11-10T06:30:25,310 WARN [FSHLog-0-hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857-prefix:4999977c7e1b,43167,1731220155767 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK], DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK]] 2024-11-10T06:30:25,311 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:25,311 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:25,311 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:25,311 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:25,311 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:25,312 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220210289 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220215304 2024-11-10T06:30:25,312 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41335:41335),(127.0.0.1/127.0.0.1:43059:43059)] 2024-11-10T06:30:25,313 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220210289 is not closed yet, will try archiving it next time 2024-11-10T06:30:25,313 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 4999977c7e1b%2C43167%2C1731220155767:(num 1731220215304) roll requested 2024-11-10T06:30:25,313 INFO [regionserver/4999977c7e1b:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C43167%2C1731220155767.1731220225313 2024-11-10T06:30:25,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741842_1018 (size=1569) 2024-11-10T06:30:25,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741842_1018 (size=1569) 2024-11-10T06:30:25,318 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/.tmp/info/a953cd82d902426dbb794c2fd3904254 is 1080, key is row0015/info:/1731220197065/Put/seqid=0 2024-11-10T06:30:25,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741844_1020 (size=12509) 2024-11-10T06:30:25,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741844_1020 (size=12509) 2024-11-10T06:30:25,326 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/.tmp/info/a953cd82d902426dbb794c2fd3904254 2024-11-10T06:30:25,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/.tmp/info/a953cd82d902426dbb794c2fd3904254 as hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/a953cd82d902426dbb794c2fd3904254 2024-11-10T06:30:25,346 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/a953cd82d902426dbb794c2fd3904254, entries=7, sequenceid=31, filesize=12.2 K 2024-11-10T06:30:30,321 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK], DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK]] 2024-11-10T06:30:30,321 WARN [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK], DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK]] 2024-11-10T06:30:30,348 INFO [FSHLog-0-hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857-prefix:4999977c7e1b,43167,1731220155767 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK], DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK]] 2024-11-10T06:30:30,348 WARN [FSHLog-0-hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857-prefix:4999977c7e1b,43167,1731220155767 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43049,DS-129cc576-bc45-48ab-aa69-bea9b900eeb9,DISK], DatanodeInfoWithStorage[127.0.0.1:38839,DS-3fafae90-837c-4050-ac38-5df1b7d896e2,DISK]] 2024-11-10T06:30:30,348 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 7f79a9825bb425f886442caf587a2d2c in 10040ms, sequenceid=31, compaction requested=true 2024-11-10T06:30:30,348 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:30,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7f79a9825bb425f886442caf587a2d2c: 2024-11-10T06:30:30,348 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:30,348 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:30,349 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-10T06:30:30,349 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:30:30,349 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:30,349 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/df914c043a2d4d2abe3078b1f3905392 because midkey is the same as first or last row 2024-11-10T06:30:30,349 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:30,349 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220215304 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220225313 2024-11-10T06:30:30,350 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41335:41335),(127.0.0.1/127.0.0.1:43059:43059)] 2024-11-10T06:30:30,350 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220215304 is not closed yet, will try archiving it next time 2024-11-10T06:30:30,350 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 4999977c7e1b%2C43167%2C1731220155767:(num 1731220230350) roll requested 2024-11-10T06:30:30,350 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220188034 to hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/oldWALs/4999977c7e1b%2C43167%2C1731220155767.1731220188034 2024-11-10T06:30:30,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f79a9825bb425f886442caf587a2d2c:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T06:30:30,350 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C43167%2C1731220155767.1731220230350 2024-11-10T06:30:30,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741843_1019 (size=438) 2024-11-10T06:30:30,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741843_1019 (size=438) 2024-11-10T06:30:30,353 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220199474 to hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/oldWALs/4999977c7e1b%2C43167%2C1731220155767.1731220199474 2024-11-10T06:30:30,353 DEBUG [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T06:30:30,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:30:30,355 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220210289 to hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/oldWALs/4999977c7e1b%2C43167%2C1731220155767.1731220210289 2024-11-10T06:30:30,357 DEBUG [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T06:30:30,357 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220215304 to hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/oldWALs/4999977c7e1b%2C43167%2C1731220155767.1731220215304 2024-11-10T06:30:30,358 DEBUG [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] regionserver.HStore(1541): 7f79a9825bb425f886442caf587a2d2c/info is initiating minor compaction (all files) 2024-11-10T06:30:30,358 INFO [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 7f79a9825bb425f886442caf587a2d2c/info in TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c. 2024-11-10T06:30:30,359 INFO [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/df914c043a2d4d2abe3078b1f3905392, hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/40810b445dd74a36b64a12fb6baac5ef, hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/a953cd82d902426dbb794c2fd3904254] into tmpdir=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/.tmp, totalSize=36.6 K 2024-11-10T06:30:30,360 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:30,360 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:30,360 DEBUG [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] compactions.Compactor(225): Compacting df914c043a2d4d2abe3078b1f3905392, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731220167992 2024-11-10T06:30:30,360 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:30,360 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:30,360 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:30,360 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220225313 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220230350 2024-11-10T06:30:30,361 DEBUG [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] compactions.Compactor(225): Compacting 40810b445dd74a36b64a12fb6baac5ef, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731220182023 2024-11-10T06:30:30,361 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43059:43059),(127.0.0.1/127.0.0.1:41335:41335)] 2024-11-10T06:30:30,362 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220225313 is not closed yet, will try archiving it next time 2024-11-10T06:30:30,362 DEBUG [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] compactions.Compactor(225): Compacting a953cd82d902426dbb794c2fd3904254, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731220197065 2024-11-10T06:30:30,362 INFO [regionserver/4999977c7e1b:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C43167%2C1731220155767.1731220230362 2024-11-10T06:30:30,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741845_1021 (size=93) 2024-11-10T06:30:30,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741845_1021 (size=93) 2024-11-10T06:30:30,367 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220225313 to hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/oldWALs/4999977c7e1b%2C43167%2C1731220155767.1731220225313 2024-11-10T06:30:30,373 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:30,373 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:30,373 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:30,374 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:30,374 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:30,374 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220230350 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220230362 2024-11-10T06:30:30,375 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41335:41335),(127.0.0.1/127.0.0.1:43059:43059)] 2024-11-10T06:30:30,375 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/WALs/4999977c7e1b,43167,1731220155767/4999977c7e1b%2C43167%2C1731220155767.1731220230350 is not closed yet, will try archiving it next time 2024-11-10T06:30:30,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741846_1022 (size=1258) 2024-11-10T06:30:30,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741846_1022 (size=1258) 2024-11-10T06:30:30,391 INFO [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f79a9825bb425f886442caf587a2d2c#info#compaction#3 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T06:30:30,392 DEBUG [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/.tmp/info/c630046faf0e4ad5ad014f4a3d98fa62 is 1080, key is row0001/info:/1731220167992/Put/seqid=0 2024-11-10T06:30:30,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741848_1024 (size=27710) 2024-11-10T06:30:30,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741848_1024 (size=27710) 2024-11-10T06:30:30,410 DEBUG [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/.tmp/info/c630046faf0e4ad5ad014f4a3d98fa62 as hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/c630046faf0e4ad5ad014f4a3d98fa62 2024-11-10T06:30:30,426 INFO [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 7f79a9825bb425f886442caf587a2d2c/info of 7f79a9825bb425f886442caf587a2d2c into c630046faf0e4ad5ad014f4a3d98fa62(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T06:30:30,426 DEBUG [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 7f79a9825bb425f886442caf587a2d2c: 2024-11-10T06:30:30,428 INFO [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c., storeName=7f79a9825bb425f886442caf587a2d2c/info, priority=13, startTime=1731220230350; duration=0sec 2024-11-10T06:30:30,429 DEBUG [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-10T06:30:30,429 DEBUG [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:30:30,429 DEBUG [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/c630046faf0e4ad5ad014f4a3d98fa62 because midkey is the same as first or last row 2024-11-10T06:30:30,429 DEBUG [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-10T06:30:30,429 DEBUG [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:30:30,429 DEBUG [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/c630046faf0e4ad5ad014f4a3d98fa62 because midkey is the same as first or last row 2024-11-10T06:30:30,430 DEBUG [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-10T06:30:30,430 DEBUG [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:30:30,430 DEBUG [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/c630046faf0e4ad5ad014f4a3d98fa62 because midkey is the same as first or last row 2024-11-10T06:30:30,430 DEBUG [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:30:30,430 DEBUG [RS:0;4999977c7e1b:43167-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f79a9825bb425f886442caf587a2d2c:info 2024-11-10T06:30:42,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43167 {}] regionserver.HRegion(8855): Flush requested on 7f79a9825bb425f886442caf587a2d2c 2024-11-10T06:30:42,386 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7f79a9825bb425f886442caf587a2d2c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-10T06:30:42,393 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/.tmp/info/d5e37cd2d8ea4d7fa481e70999585e09 is 1080, key is row0022/info:/1731220230363/Put/seqid=0 2024-11-10T06:30:42,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741849_1025 (size=12509) 2024-11-10T06:30:42,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741849_1025 (size=12509) 2024-11-10T06:30:42,405 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/.tmp/info/d5e37cd2d8ea4d7fa481e70999585e09 2024-11-10T06:30:42,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/.tmp/info/d5e37cd2d8ea4d7fa481e70999585e09 as hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/d5e37cd2d8ea4d7fa481e70999585e09 2024-11-10T06:30:42,423 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/d5e37cd2d8ea4d7fa481e70999585e09, entries=7, sequenceid=42, filesize=12.2 K 2024-11-10T06:30:42,424 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 7f79a9825bb425f886442caf587a2d2c in 38ms, sequenceid=42, compaction requested=false 2024-11-10T06:30:42,424 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7f79a9825bb425f886442caf587a2d2c: 2024-11-10T06:30:42,425 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-10T06:30:42,425 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:30:42,425 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/c630046faf0e4ad5ad014f4a3d98fa62 because midkey is the same as first or last row 2024-11-10T06:30:44,181 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T06:30:48,315 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 7f79a9825bb425f886442caf587a2d2c, had cached 0 bytes from a total of 40219 2024-11-10T06:30:50,397 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T06:30:50,397 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T06:30:50,397 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:30:50,403 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:30:50,403 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:30:50,403 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T06:30:50,403 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T06:30:50,404 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=933253213, stopped=false 2024-11-10T06:30:50,404 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=4999977c7e1b,40045,1731220155094 2024-11-10T06:30:50,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43167-0x10190de9cef0001, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T06:30:50,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T06:30:50,407 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T06:30:50,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:50,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43167-0x10190de9cef0001, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:50,407 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T06:30:50,407 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:30:50,407 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:30:50,408 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:30:50,408 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4999977c7e1b,43167,1731220155767' ***** 2024-11-10T06:30:50,408 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T06:30:50,408 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43167-0x10190de9cef0001, quorum=127.0.0.1:56126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:30:50,408 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T06:30:50,408 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T06:30:50,408 INFO [RS:0;4999977c7e1b:43167 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T06:30:50,409 INFO [RS:0;4999977c7e1b:43167 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T06:30:50,409 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.HRegionServer(3091): Received CLOSE for 7f79a9825bb425f886442caf587a2d2c 2024-11-10T06:30:50,409 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.HRegionServer(959): stopping server 4999977c7e1b,43167,1731220155767 2024-11-10T06:30:50,409 INFO [RS:0;4999977c7e1b:43167 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T06:30:50,409 INFO [RS:0;4999977c7e1b:43167 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;4999977c7e1b:43167. 2024-11-10T06:30:50,410 DEBUG [RS:0;4999977c7e1b:43167 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:30:50,410 DEBUG [RS:0;4999977c7e1b:43167 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:30:50,410 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 7f79a9825bb425f886442caf587a2d2c, disabling compactions & flushes 2024-11-10T06:30:50,410 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T06:30:50,410 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T06:30:50,410 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c. 2024-11-10T06:30:50,410 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T06:30:50,410 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c. 2024-11-10T06:30:50,410 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c. after waiting 0 ms 2024-11-10T06:30:50,410 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T06:30:50,410 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c. 2024-11-10T06:30:50,410 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 7f79a9825bb425f886442caf587a2d2c 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-10T06:30:50,410 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-10T06:30:50,410 DEBUG [RS:0;4999977c7e1b:43167 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 7f79a9825bb425f886442caf587a2d2c=TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c.} 2024-11-10T06:30:50,410 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T06:30:50,411 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T06:30:50,411 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T06:30:50,411 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T06:30:50,411 DEBUG [RS:0;4999977c7e1b:43167 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 7f79a9825bb425f886442caf587a2d2c 2024-11-10T06:30:50,411 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T06:30:50,411 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-10T06:30:50,417 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/.tmp/info/5f0303a91a7947a484a8884a28855cdb is 1080, key is row0029/info:/1731220244388/Put/seqid=0 2024-11-10T06:30:50,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741850_1026 (size=8193) 2024-11-10T06:30:50,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741850_1026 (size=8193) 2024-11-10T06:30:50,425 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/.tmp/info/5f0303a91a7947a484a8884a28855cdb 2024-11-10T06:30:50,433 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/.tmp/info/5f0303a91a7947a484a8884a28855cdb as hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/5f0303a91a7947a484a8884a28855cdb 2024-11-10T06:30:50,435 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740/.tmp/info/b8b092a0aeab4c7fbd126543ad53d58c is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c./info:regioninfo/1731220158338/Put/seqid=0 2024-11-10T06:30:50,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741851_1027 (size=7016) 2024-11-10T06:30:50,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741851_1027 (size=7016) 2024-11-10T06:30:50,443 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/5f0303a91a7947a484a8884a28855cdb, entries=3, sequenceid=48, filesize=8.0 K 2024-11-10T06:30:50,443 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740/.tmp/info/b8b092a0aeab4c7fbd126543ad53d58c 2024-11-10T06:30:50,444 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 7f79a9825bb425f886442caf587a2d2c in 34ms, sequenceid=48, compaction requested=true 2024-11-10T06:30:50,445 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/df914c043a2d4d2abe3078b1f3905392, hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/40810b445dd74a36b64a12fb6baac5ef, hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/a953cd82d902426dbb794c2fd3904254] to archive 2024-11-10T06:30:50,448 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-10T06:30:50,451 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/df914c043a2d4d2abe3078b1f3905392 to hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/archive/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/df914c043a2d4d2abe3078b1f3905392 2024-11-10T06:30:50,453 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/40810b445dd74a36b64a12fb6baac5ef to hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/archive/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/40810b445dd74a36b64a12fb6baac5ef 2024-11-10T06:30:50,455 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/a953cd82d902426dbb794c2fd3904254 to hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/archive/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/info/a953cd82d902426dbb794c2fd3904254 2024-11-10T06:30:50,467 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740/.tmp/ns/7883f4d2a5404df2b0f166f98afc4e71 is 43, key is default/ns:d/1731220157605/Put/seqid=0 2024-11-10T06:30:50,468 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=4999977c7e1b:40045 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-10T06:30:50,473 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [df914c043a2d4d2abe3078b1f3905392=12509, 40810b445dd74a36b64a12fb6baac5ef=12509, a953cd82d902426dbb794c2fd3904254=12509] 2024-11-10T06:30:50,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741852_1028 (size=5153) 2024-11-10T06:30:50,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741852_1028 (size=5153) 2024-11-10T06:30:50,475 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740/.tmp/ns/7883f4d2a5404df2b0f166f98afc4e71 2024-11-10T06:30:50,479 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/default/TestLogRolling-testSlowSyncLogRolling/7f79a9825bb425f886442caf587a2d2c/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-10T06:30:50,482 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c. 2024-11-10T06:30:50,482 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 7f79a9825bb425f886442caf587a2d2c: Waiting for close lock at 1731220250409Running coprocessor pre-close hooks at 1731220250410 (+1 ms)Disabling compacts and flushes for region at 1731220250410Disabling writes for close at 1731220250410Obtaining lock to block concurrent updates at 1731220250410Preparing flush snapshotting stores in 7f79a9825bb425f886442caf587a2d2c at 1731220250410Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731220250411 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c. at 1731220250412 (+1 ms)Flushing 7f79a9825bb425f886442caf587a2d2c/info: creating writer at 1731220250412Flushing 7f79a9825bb425f886442caf587a2d2c/info: appending metadata at 1731220250417 (+5 ms)Flushing 7f79a9825bb425f886442caf587a2d2c/info: closing flushed file at 1731220250417Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8e8015d: reopening flushed file at 1731220250432 (+15 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 7f79a9825bb425f886442caf587a2d2c in 34ms, sequenceid=48, compaction requested=true at 1731220250444 (+12 ms)Writing region close event to WAL at 1731220250474 (+30 ms)Running coprocessor post-close hooks at 1731220250480 (+6 ms)Closed at 1731220250482 (+2 ms) 2024-11-10T06:30:50,483 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731220157860.7f79a9825bb425f886442caf587a2d2c. 2024-11-10T06:30:50,499 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740/.tmp/table/8f6ecab6f939462fb1e1b21590b4c36c is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731220158355/Put/seqid=0 2024-11-10T06:30:50,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741853_1029 (size=5396) 2024-11-10T06:30:50,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741853_1029 (size=5396) 2024-11-10T06:30:50,507 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740/.tmp/table/8f6ecab6f939462fb1e1b21590b4c36c 2024-11-10T06:30:50,516 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740/.tmp/info/b8b092a0aeab4c7fbd126543ad53d58c as hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740/info/b8b092a0aeab4c7fbd126543ad53d58c 2024-11-10T06:30:50,524 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740/info/b8b092a0aeab4c7fbd126543ad53d58c, entries=10, sequenceid=11, filesize=6.9 K 2024-11-10T06:30:50,526 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740/.tmp/ns/7883f4d2a5404df2b0f166f98afc4e71 as hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740/ns/7883f4d2a5404df2b0f166f98afc4e71 2024-11-10T06:30:50,534 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740/ns/7883f4d2a5404df2b0f166f98afc4e71, entries=2, sequenceid=11, filesize=5.0 K 2024-11-10T06:30:50,535 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740/.tmp/table/8f6ecab6f939462fb1e1b21590b4c36c as hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740/table/8f6ecab6f939462fb1e1b21590b4c36c 2024-11-10T06:30:50,543 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740/table/8f6ecab6f939462fb1e1b21590b4c36c, entries=2, sequenceid=11, filesize=5.3 K 2024-11-10T06:30:50,545 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 134ms, sequenceid=11, compaction requested=false 2024-11-10T06:30:50,551 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-10T06:30:50,552 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T06:30:50,552 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T06:30:50,552 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731220250410Running coprocessor pre-close hooks at 1731220250410Disabling compacts and flushes for region at 1731220250410Disabling writes for close at 1731220250411 (+1 ms)Obtaining lock to block concurrent updates at 1731220250411Preparing flush snapshotting stores in 1588230740 at 1731220250411Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731220250411Flushing stores of hbase:meta,,1.1588230740 at 1731220250412 (+1 ms)Flushing 1588230740/info: creating writer at 1731220250413 (+1 ms)Flushing 1588230740/info: appending metadata at 1731220250435 (+22 ms)Flushing 1588230740/info: closing flushed file at 1731220250435Flushing 1588230740/ns: creating writer at 1731220250451 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731220250467 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731220250467Flushing 1588230740/table: creating writer at 1731220250483 (+16 ms)Flushing 1588230740/table: appending metadata at 1731220250499 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731220250499Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1279f7ae: reopening flushed file at 1731220250514 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c94520a: reopening flushed file at 1731220250525 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@50d1286d: reopening flushed file at 1731220250534 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 134ms, sequenceid=11, compaction requested=false at 1731220250545 (+11 ms)Writing region close event to WAL at 1731220250546 (+1 ms)Running coprocessor post-close hooks at 1731220250552 (+6 ms)Closed at 1731220250552 2024-11-10T06:30:50,552 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T06:30:50,611 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.HRegionServer(976): stopping server 4999977c7e1b,43167,1731220155767; all regions closed. 2024-11-10T06:30:50,613 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:50,613 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:50,613 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:50,613 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:50,614 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:50,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741834_1010 (size=3066) 2024-11-10T06:30:50,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741834_1010 (size=3066) 2024-11-10T06:30:50,620 DEBUG [RS:0;4999977c7e1b:43167 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/oldWALs 2024-11-10T06:30:50,620 INFO [RS:0;4999977c7e1b:43167 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 4999977c7e1b%2C43167%2C1731220155767.meta:.meta(num 1731220157455) 2024-11-10T06:30:50,621 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:50,621 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:50,621 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:50,621 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:50,621 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:50,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741847_1023 (size=12695) 2024-11-10T06:30:50,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741847_1023 (size=12695) 2024-11-10T06:30:50,887 INFO [regionserver/4999977c7e1b:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T06:30:50,967 INFO [regionserver/4999977c7e1b:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-10T06:30:50,967 INFO [regionserver/4999977c7e1b:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-10T06:30:51,029 DEBUG [RS:0;4999977c7e1b:43167 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/oldWALs 2024-11-10T06:30:51,029 INFO [RS:0;4999977c7e1b:43167 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 4999977c7e1b%2C43167%2C1731220155767:(num 1731220230362) 2024-11-10T06:30:51,029 DEBUG [RS:0;4999977c7e1b:43167 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:30:51,029 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T06:30:51,029 INFO [RS:0;4999977c7e1b:43167 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T06:30:51,029 INFO [RS:0;4999977c7e1b:43167 {}] hbase.ChoreService(370): Chore service for: regionserver/4999977c7e1b:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T06:30:51,030 INFO [RS:0;4999977c7e1b:43167 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T06:30:51,030 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T06:30:51,030 INFO [RS:0;4999977c7e1b:43167 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43167 2024-11-10T06:30:51,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43167-0x10190de9cef0001, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4999977c7e1b,43167,1731220155767 2024-11-10T06:30:51,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T06:30:51,034 INFO [RS:0;4999977c7e1b:43167 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T06:30:51,036 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4999977c7e1b,43167,1731220155767] 2024-11-10T06:30:51,038 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4999977c7e1b,43167,1731220155767 already deleted, retry=false 2024-11-10T06:30:51,038 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4999977c7e1b,43167,1731220155767 expired; onlineServers=0 2024-11-10T06:30:51,038 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '4999977c7e1b,40045,1731220155094' ***** 2024-11-10T06:30:51,038 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T06:30:51,038 INFO [M:0;4999977c7e1b:40045 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T06:30:51,038 INFO [M:0;4999977c7e1b:40045 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T06:30:51,038 DEBUG [M:0;4999977c7e1b:40045 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T06:30:51,038 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T06:30:51,038 DEBUG [M:0;4999977c7e1b:40045 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T06:30:51,039 DEBUG [master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220156729 {}] cleaner.HFileCleaner(306): Exit Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220156729,5,FailOnTimeoutGroup] 2024-11-10T06:30:51,039 DEBUG [master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220156725 {}] cleaner.HFileCleaner(306): Exit Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220156725,5,FailOnTimeoutGroup] 2024-11-10T06:30:51,039 INFO [M:0;4999977c7e1b:40045 {}] hbase.ChoreService(370): Chore service for: master/4999977c7e1b:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T06:30:51,039 INFO [M:0;4999977c7e1b:40045 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T06:30:51,039 DEBUG [M:0;4999977c7e1b:40045 {}] master.HMaster(1795): Stopping service threads 2024-11-10T06:30:51,039 INFO [M:0;4999977c7e1b:40045 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T06:30:51,039 INFO [M:0;4999977c7e1b:40045 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T06:30:51,040 INFO [M:0;4999977c7e1b:40045 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T06:30:51,040 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T06:30:51,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T06:30:51,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:51,041 DEBUG [M:0;4999977c7e1b:40045 {}] zookeeper.ZKUtil(347): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T06:30:51,041 WARN [M:0;4999977c7e1b:40045 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T06:30:51,041 INFO [M:0;4999977c7e1b:40045 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/.lastflushedseqids 2024-11-10T06:30:51,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741854_1030 (size=130) 2024-11-10T06:30:51,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741854_1030 (size=130) 2024-11-10T06:30:51,054 INFO [M:0;4999977c7e1b:40045 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T06:30:51,055 INFO [M:0;4999977c7e1b:40045 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T06:30:51,055 DEBUG [M:0;4999977c7e1b:40045 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T06:30:51,055 INFO [M:0;4999977c7e1b:40045 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:30:51,055 DEBUG [M:0;4999977c7e1b:40045 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:30:51,055 DEBUG [M:0;4999977c7e1b:40045 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T06:30:51,055 DEBUG [M:0;4999977c7e1b:40045 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:30:51,055 INFO [M:0;4999977c7e1b:40045 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-11-10T06:30:51,074 DEBUG [M:0;4999977c7e1b:40045 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/afc3a14a15644cc58289346c4d5fd28a is 82, key is hbase:meta,,1/info:regioninfo/1731220157531/Put/seqid=0 2024-11-10T06:30:51,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741855_1031 (size=5672) 2024-11-10T06:30:51,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741855_1031 (size=5672) 2024-11-10T06:30:51,080 INFO [M:0;4999977c7e1b:40045 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/afc3a14a15644cc58289346c4d5fd28a 2024-11-10T06:30:51,106 DEBUG [M:0;4999977c7e1b:40045 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ee9299ca73da4a7abf004e37e2e6a2c9 is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731220158362/Put/seqid=0 2024-11-10T06:30:51,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741856_1032 (size=6248) 2024-11-10T06:30:51,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741856_1032 (size=6248) 2024-11-10T06:30:51,113 INFO [M:0;4999977c7e1b:40045 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ee9299ca73da4a7abf004e37e2e6a2c9 2024-11-10T06:30:51,121 INFO [M:0;4999977c7e1b:40045 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ee9299ca73da4a7abf004e37e2e6a2c9 2024-11-10T06:30:51,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43167-0x10190de9cef0001, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:30:51,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43167-0x10190de9cef0001, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:30:51,137 INFO [RS:0;4999977c7e1b:43167 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T06:30:51,137 INFO [RS:0;4999977c7e1b:43167 {}] regionserver.HRegionServer(1031): Exiting; stopping=4999977c7e1b,43167,1731220155767; zookeeper connection closed. 2024-11-10T06:30:51,137 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@62da0b80 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@62da0b80 2024-11-10T06:30:51,138 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-10T06:30:51,144 DEBUG [M:0;4999977c7e1b:40045 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2ea532db0488407caf007516bd8ae8db is 69, key is 4999977c7e1b,43167,1731220155767/rs:state/1731220156803/Put/seqid=0 2024-11-10T06:30:51,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741857_1033 (size=5156) 2024-11-10T06:30:51,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741857_1033 (size=5156) 2024-11-10T06:30:51,151 INFO [M:0;4999977c7e1b:40045 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2ea532db0488407caf007516bd8ae8db 2024-11-10T06:30:51,175 DEBUG [M:0;4999977c7e1b:40045 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4f248af2774f4af2ad7ffa439b84d916 is 52, key is load_balancer_on/state:d/1731220157839/Put/seqid=0 2024-11-10T06:30:51,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741858_1034 (size=5056) 2024-11-10T06:30:51,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741858_1034 (size=5056) 2024-11-10T06:30:51,183 INFO [M:0;4999977c7e1b:40045 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4f248af2774f4af2ad7ffa439b84d916 2024-11-10T06:30:51,190 DEBUG [M:0;4999977c7e1b:40045 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/afc3a14a15644cc58289346c4d5fd28a as hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/afc3a14a15644cc58289346c4d5fd28a 2024-11-10T06:30:51,197 INFO [M:0;4999977c7e1b:40045 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/afc3a14a15644cc58289346c4d5fd28a, entries=8, sequenceid=59, filesize=5.5 K 2024-11-10T06:30:51,199 DEBUG [M:0;4999977c7e1b:40045 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ee9299ca73da4a7abf004e37e2e6a2c9 as hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ee9299ca73da4a7abf004e37e2e6a2c9 2024-11-10T06:30:51,207 INFO [M:0;4999977c7e1b:40045 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ee9299ca73da4a7abf004e37e2e6a2c9 2024-11-10T06:30:51,207 INFO [M:0;4999977c7e1b:40045 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ee9299ca73da4a7abf004e37e2e6a2c9, entries=6, sequenceid=59, filesize=6.1 K 2024-11-10T06:30:51,209 DEBUG [M:0;4999977c7e1b:40045 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2ea532db0488407caf007516bd8ae8db as hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2ea532db0488407caf007516bd8ae8db 2024-11-10T06:30:51,216 INFO [M:0;4999977c7e1b:40045 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2ea532db0488407caf007516bd8ae8db, entries=1, sequenceid=59, filesize=5.0 K 2024-11-10T06:30:51,218 DEBUG [M:0;4999977c7e1b:40045 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4f248af2774f4af2ad7ffa439b84d916 as hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4f248af2774f4af2ad7ffa439b84d916 2024-11-10T06:30:51,225 INFO [M:0;4999977c7e1b:40045 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4f248af2774f4af2ad7ffa439b84d916, entries=1, sequenceid=59, filesize=4.9 K 2024-11-10T06:30:51,226 INFO [M:0;4999977c7e1b:40045 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 171ms, sequenceid=59, compaction requested=false 2024-11-10T06:30:51,228 INFO [M:0;4999977c7e1b:40045 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:30:51,228 DEBUG [M:0;4999977c7e1b:40045 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731220251055Disabling compacts and flushes for region at 1731220251055Disabling writes for close at 1731220251055Obtaining lock to block concurrent updates at 1731220251055Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731220251055Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1731220251056 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731220251056Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731220251057 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731220251074 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731220251074Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731220251088 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731220251105 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731220251105Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731220251121 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731220251143 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731220251143Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731220251158 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731220251174 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731220251174Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f0d50cf: reopening flushed file at 1731220251189 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@745febb9: reopening flushed file at 1731220251198 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@303f5b41: reopening flushed file at 1731220251208 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@780a6903: reopening flushed file at 1731220251217 (+9 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 171ms, sequenceid=59, compaction requested=false at 1731220251226 (+9 ms)Writing region close event to WAL at 1731220251228 (+2 ms)Closed at 1731220251228 2024-11-10T06:30:51,229 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:51,230 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:51,230 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:51,230 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:51,230 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:51,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38839 is added to blk_1073741830_1006 (size=27985) 2024-11-10T06:30:51,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43049 is added to blk_1073741830_1006 (size=27985) 2024-11-10T06:30:51,234 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T06:30:51,234 INFO [M:0;4999977c7e1b:40045 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T06:30:51,234 INFO [M:0;4999977c7e1b:40045 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40045 2024-11-10T06:30:51,234 INFO [M:0;4999977c7e1b:40045 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T06:30:51,336 INFO [M:0;4999977c7e1b:40045 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T06:30:51,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:30:51,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40045-0x10190de9cef0000, quorum=127.0.0.1:56126, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:30:51,341 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78ec6c63{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:30:51,344 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@633713c3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:30:51,344 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:30:51,344 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@29058af{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:30:51,344 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@154eec55{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/hadoop.log.dir/,STOPPED} 2024-11-10T06:30:51,347 WARN [BP-965741819-172.17.0.2-1731220152180 heartbeating to localhost/127.0.0.1:37837 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:30:51,347 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:30:51,347 WARN [BP-965741819-172.17.0.2-1731220152180 heartbeating to localhost/127.0.0.1:37837 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-965741819-172.17.0.2-1731220152180 (Datanode Uuid 550bacff-55a2-4337-a11d-699b6ff63ea4) service to localhost/127.0.0.1:37837 2024-11-10T06:30:51,347 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:30:51,349 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/cluster_7c3bcd10-8fd8-bc58-53c9-8bdeb6c9f462/data/data3/current/BP-965741819-172.17.0.2-1731220152180 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:30:51,349 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/cluster_7c3bcd10-8fd8-bc58-53c9-8bdeb6c9f462/data/data4/current/BP-965741819-172.17.0.2-1731220152180 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:30:51,350 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:30:51,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b32dfc5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:30:51,352 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43ebd249{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:30:51,353 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:30:51,353 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e911877{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:30:51,353 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@460757e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/hadoop.log.dir/,STOPPED} 2024-11-10T06:30:51,355 WARN [BP-965741819-172.17.0.2-1731220152180 heartbeating to localhost/127.0.0.1:37837 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:30:51,355 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:30:51,356 WARN [BP-965741819-172.17.0.2-1731220152180 heartbeating to localhost/127.0.0.1:37837 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-965741819-172.17.0.2-1731220152180 (Datanode Uuid 41ec3bb4-9674-468e-9c95-a17c2fd23f96) service to localhost/127.0.0.1:37837 2024-11-10T06:30:51,356 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:30:51,356 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/cluster_7c3bcd10-8fd8-bc58-53c9-8bdeb6c9f462/data/data1/current/BP-965741819-172.17.0.2-1731220152180 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:30:51,356 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/cluster_7c3bcd10-8fd8-bc58-53c9-8bdeb6c9f462/data/data2/current/BP-965741819-172.17.0.2-1731220152180 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:30:51,357 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:30:51,368 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c1a236c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T06:30:51,369 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a625720{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:30:51,369 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:30:51,369 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ae7f863{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:30:51,369 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c5202f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/hadoop.log.dir/,STOPPED} 2024-11-10T06:30:51,378 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-10T06:30:51,411 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-10T06:30:51,421 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=78 (was 12) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:37837 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:37837 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37837 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37837 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@7d305265 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:37837 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37837 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:37837 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/4999977c7e1b:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: master/4999977c7e1b:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/4999977c7e1b:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37837 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=7 (was 5) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7628 (was 8170) 2024-11-10T06:30:51,428 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=79, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=7, ProcessCount=11, AvailableMemoryMB=7628 2024-11-10T06:30:51,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T06:30:51,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/hadoop.log.dir so I do NOT create it in target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de 2024-11-10T06:30:51,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4d55a94e-5807-0c6d-cb75-81d498f40969/hadoop.tmp.dir so I do NOT create it in target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de 2024-11-10T06:30:51,429 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/cluster_36a5731b-0275-4d73-7c1f-12ead882a745, deleteOnExit=true 2024-11-10T06:30:51,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-10T06:30:51,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/test.cache.data in system properties and HBase conf 2024-11-10T06:30:51,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T06:30:51,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/hadoop.log.dir in system properties and HBase conf 2024-11-10T06:30:51,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T06:30:51,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T06:30:51,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T06:30:51,430 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T06:30:51,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T06:30:51,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T06:30:51,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T06:30:51,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T06:30:51,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T06:30:51,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T06:30:51,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T06:30:51,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T06:30:51,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T06:30:51,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/nfs.dump.dir in system properties and HBase conf 2024-11-10T06:30:51,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/java.io.tmpdir in system properties and HBase conf 2024-11-10T06:30:51,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T06:30:51,432 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T06:30:51,432 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T06:30:51,447 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T06:30:51,517 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:30:51,524 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:30:51,525 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:30:51,525 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:30:51,525 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T06:30:51,526 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:30:51,527 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19bf40f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:30:51,527 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c2cf0b6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:30:51,646 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6c790f33{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/java.io.tmpdir/jetty-localhost-35583-hadoop-hdfs-3_4_1-tests_jar-_-any-14111655195207358032/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T06:30:51,647 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@488dcf65{HTTP/1.1, (http/1.1)}{localhost:35583} 2024-11-10T06:30:51,647 INFO [Time-limited test {}] server.Server(415): Started @101348ms 2024-11-10T06:30:51,663 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T06:30:51,731 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:30:51,735 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:30:51,736 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:30:51,736 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:30:51,736 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T06:30:51,736 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@232ba44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:30:51,737 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@695feae9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:30:51,854 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2a90f125{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/java.io.tmpdir/jetty-localhost-45669-hadoop-hdfs-3_4_1-tests_jar-_-any-138467643557026237/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:30:51,854 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7b8beccb{HTTP/1.1, (http/1.1)}{localhost:45669} 2024-11-10T06:30:51,854 INFO [Time-limited test {}] server.Server(415): Started @101555ms 2024-11-10T06:30:51,856 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:30:51,899 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:30:51,903 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:30:51,904 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:30:51,904 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:30:51,904 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T06:30:51,905 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74fcc9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:30:51,905 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d87dfcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:30:51,949 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/cluster_36a5731b-0275-4d73-7c1f-12ead882a745/data/data1/current/BP-505658187-172.17.0.2-1731220251466/current, will proceed with Du for space computation calculation, 2024-11-10T06:30:51,949 WARN [Thread-439 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/cluster_36a5731b-0275-4d73-7c1f-12ead882a745/data/data2/current/BP-505658187-172.17.0.2-1731220251466/current, will proceed with Du for space computation calculation, 2024-11-10T06:30:51,970 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:30:51,973 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3780a30f587e7072 with lease ID 0x921bbaa401a3c96b: Processing first storage report for DS-325ff1a2-eaef-418d-ac67-b7450e3046db from datanode DatanodeRegistration(127.0.0.1:41591, datanodeUuid=c044b893-beb2-4522-b571-7358606edc0c, infoPort=39171, infoSecurePort=0, ipcPort=45885, storageInfo=lv=-57;cid=testClusterID;nsid=1047606458;c=1731220251466) 2024-11-10T06:30:51,973 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3780a30f587e7072 with lease ID 0x921bbaa401a3c96b: from storage DS-325ff1a2-eaef-418d-ac67-b7450e3046db node DatanodeRegistration(127.0.0.1:41591, datanodeUuid=c044b893-beb2-4522-b571-7358606edc0c, infoPort=39171, infoSecurePort=0, ipcPort=45885, storageInfo=lv=-57;cid=testClusterID;nsid=1047606458;c=1731220251466), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:30:51,973 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3780a30f587e7072 with lease ID 0x921bbaa401a3c96b: Processing first storage report for DS-cc8a02de-109f-49ee-958f-573bb68caebf from datanode DatanodeRegistration(127.0.0.1:41591, datanodeUuid=c044b893-beb2-4522-b571-7358606edc0c, infoPort=39171, infoSecurePort=0, ipcPort=45885, storageInfo=lv=-57;cid=testClusterID;nsid=1047606458;c=1731220251466) 2024-11-10T06:30:51,973 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3780a30f587e7072 with lease ID 0x921bbaa401a3c96b: from storage DS-cc8a02de-109f-49ee-958f-573bb68caebf node DatanodeRegistration(127.0.0.1:41591, datanodeUuid=c044b893-beb2-4522-b571-7358606edc0c, infoPort=39171, infoSecurePort=0, ipcPort=45885, storageInfo=lv=-57;cid=testClusterID;nsid=1047606458;c=1731220251466), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:30:52,023 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@d071559{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/java.io.tmpdir/jetty-localhost-34217-hadoop-hdfs-3_4_1-tests_jar-_-any-16388779181559379768/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:30:52,024 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ece158b{HTTP/1.1, (http/1.1)}{localhost:34217} 2024-11-10T06:30:52,024 INFO [Time-limited test {}] server.Server(415): Started @101725ms 2024-11-10T06:30:52,026 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:30:52,119 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/cluster_36a5731b-0275-4d73-7c1f-12ead882a745/data/data3/current/BP-505658187-172.17.0.2-1731220251466/current, will proceed with Du for space computation calculation, 2024-11-10T06:30:52,119 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/cluster_36a5731b-0275-4d73-7c1f-12ead882a745/data/data4/current/BP-505658187-172.17.0.2-1731220251466/current, will proceed with Du for space computation calculation, 2024-11-10T06:30:52,145 WARN [Thread-453 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:30:52,148 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xef7f8ff13a81695b with lease ID 0x921bbaa401a3c96c: Processing first storage report for DS-40744519-5c65-4f2a-833b-fba2f290acba from datanode DatanodeRegistration(127.0.0.1:46721, datanodeUuid=08c1a089-8dbc-4fbe-b842-2264d2379583, infoPort=42003, infoSecurePort=0, ipcPort=36453, storageInfo=lv=-57;cid=testClusterID;nsid=1047606458;c=1731220251466) 2024-11-10T06:30:52,148 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xef7f8ff13a81695b with lease ID 0x921bbaa401a3c96c: from storage DS-40744519-5c65-4f2a-833b-fba2f290acba node DatanodeRegistration(127.0.0.1:46721, datanodeUuid=08c1a089-8dbc-4fbe-b842-2264d2379583, infoPort=42003, infoSecurePort=0, ipcPort=36453, storageInfo=lv=-57;cid=testClusterID;nsid=1047606458;c=1731220251466), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:30:52,148 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xef7f8ff13a81695b with lease ID 0x921bbaa401a3c96c: Processing first storage report for DS-f74f3e76-f933-4d57-8e4e-273408491b46 from datanode DatanodeRegistration(127.0.0.1:46721, datanodeUuid=08c1a089-8dbc-4fbe-b842-2264d2379583, infoPort=42003, infoSecurePort=0, ipcPort=36453, storageInfo=lv=-57;cid=testClusterID;nsid=1047606458;c=1731220251466) 2024-11-10T06:30:52,148 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xef7f8ff13a81695b with lease ID 0x921bbaa401a3c96c: from storage DS-f74f3e76-f933-4d57-8e4e-273408491b46 node DatanodeRegistration(127.0.0.1:46721, datanodeUuid=08c1a089-8dbc-4fbe-b842-2264d2379583, infoPort=42003, infoSecurePort=0, ipcPort=36453, storageInfo=lv=-57;cid=testClusterID;nsid=1047606458;c=1731220251466), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:30:52,155 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de 2024-11-10T06:30:52,158 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/cluster_36a5731b-0275-4d73-7c1f-12ead882a745/zookeeper_0, clientPort=51185, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/cluster_36a5731b-0275-4d73-7c1f-12ead882a745/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/cluster_36a5731b-0275-4d73-7c1f-12ead882a745/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T06:30:52,159 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51185 2024-11-10T06:30:52,160 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:30:52,162 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:30:52,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46721 is added to blk_1073741825_1001 (size=7) 2024-11-10T06:30:52,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41591 is added to blk_1073741825_1001 (size=7) 2024-11-10T06:30:52,174 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692 with version=8 2024-11-10T06:30:52,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/hbase-staging 2024-11-10T06:30:52,177 INFO [Time-limited test {}] client.ConnectionUtils(128): master/4999977c7e1b:0 server-side Connection retries=45 2024-11-10T06:30:52,177 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:30:52,177 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T06:30:52,177 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T06:30:52,177 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:30:52,177 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T06:30:52,177 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T06:30:52,177 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T06:30:52,178 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39153 2024-11-10T06:30:52,179 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39153 connecting to ZooKeeper ensemble=127.0.0.1:51185 2024-11-10T06:30:52,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:391530x0, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T06:30:52,187 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39153-0x10190e01b660000 connected 2024-11-10T06:30:52,202 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:30:52,204 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:30:52,207 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:30:52,207 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692, hbase.cluster.distributed=false 2024-11-10T06:30:52,209 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T06:30:52,209 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39153 2024-11-10T06:30:52,210 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39153 2024-11-10T06:30:52,210 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39153 2024-11-10T06:30:52,210 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39153 2024-11-10T06:30:52,210 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39153 2024-11-10T06:30:52,228 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4999977c7e1b:0 server-side Connection retries=45 2024-11-10T06:30:52,228 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:30:52,228 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T06:30:52,228 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T06:30:52,228 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:30:52,228 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T06:30:52,228 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T06:30:52,228 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T06:30:52,229 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34437 2024-11-10T06:30:52,230 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34437 connecting to ZooKeeper ensemble=127.0.0.1:51185 2024-11-10T06:30:52,231 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:30:52,233 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:30:52,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:344370x0, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T06:30:52,239 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34437-0x10190e01b660001 connected 2024-11-10T06:30:52,239 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:30:52,239 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T06:30:52,240 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T06:30:52,241 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T06:30:52,242 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T06:30:52,242 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34437 2024-11-10T06:30:52,244 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34437 2024-11-10T06:30:52,244 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34437 2024-11-10T06:30:52,246 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34437 2024-11-10T06:30:52,248 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34437 2024-11-10T06:30:52,261 DEBUG [M:0;4999977c7e1b:39153 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;4999977c7e1b:39153 2024-11-10T06:30:52,261 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/4999977c7e1b,39153,1731220252176 2024-11-10T06:30:52,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:30:52,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:30:52,264 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/4999977c7e1b,39153,1731220252176 2024-11-10T06:30:52,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T06:30:52,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:52,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:52,267 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T06:30:52,267 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/4999977c7e1b,39153,1731220252176 from backup master directory 2024-11-10T06:30:52,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:30:52,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/4999977c7e1b,39153,1731220252176 2024-11-10T06:30:52,270 WARN [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T06:30:52,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:30:52,270 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=4999977c7e1b,39153,1731220252176 2024-11-10T06:30:52,275 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/hbase.id] with ID: 8424b2db-03bd-420a-bcaf-86fb29cb5ee4 2024-11-10T06:30:52,275 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/.tmp/hbase.id 2024-11-10T06:30:52,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41591 is added to blk_1073741826_1002 (size=42) 2024-11-10T06:30:52,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46721 is added to blk_1073741826_1002 (size=42) 2024-11-10T06:30:52,286 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/.tmp/hbase.id]:[hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/hbase.id] 2024-11-10T06:30:52,302 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:30:52,302 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T06:30:52,304 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-10T06:30:52,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:52,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:52,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41591 is added to blk_1073741827_1003 (size=196) 2024-11-10T06:30:52,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46721 is added to blk_1073741827_1003 (size=196) 2024-11-10T06:30:52,319 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T06:30:52,321 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T06:30:52,325 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:30:52,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41591 is added to blk_1073741828_1004 (size=1189) 2024-11-10T06:30:52,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46721 is added to blk_1073741828_1004 (size=1189) 2024-11-10T06:30:52,335 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store 2024-11-10T06:30:52,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46721 is added to blk_1073741829_1005 (size=34) 2024-11-10T06:30:52,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41591 is added to blk_1073741829_1005 (size=34) 2024-11-10T06:30:52,343 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:30:52,343 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T06:30:52,343 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:30:52,344 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:30:52,344 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T06:30:52,344 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:30:52,344 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:30:52,344 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731220252343Disabling compacts and flushes for region at 1731220252343Disabling writes for close at 1731220252344 (+1 ms)Writing region close event to WAL at 1731220252344Closed at 1731220252344 2024-11-10T06:30:52,345 WARN [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/.initializing 2024-11-10T06:30:52,345 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/WALs/4999977c7e1b,39153,1731220252176 2024-11-10T06:30:52,349 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C39153%2C1731220252176, suffix=, logDir=hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/WALs/4999977c7e1b,39153,1731220252176, archiveDir=hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/oldWALs, maxLogs=10 2024-11-10T06:30:52,350 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C39153%2C1731220252176.1731220252349 2024-11-10T06:30:52,357 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/WALs/4999977c7e1b,39153,1731220252176/4999977c7e1b%2C39153%2C1731220252176.1731220252349 2024-11-10T06:30:52,358 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42003:42003),(127.0.0.1/127.0.0.1:39171:39171)] 2024-11-10T06:30:52,358 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T06:30:52,358 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:30:52,359 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:52,359 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:52,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:52,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T06:30:52,362 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:52,363 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:30:52,363 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:52,364 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T06:30:52,364 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:52,365 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:30:52,365 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:52,367 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T06:30:52,367 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:52,368 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:30:52,368 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:52,369 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T06:30:52,369 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:52,370 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:30:52,370 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:52,371 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:52,371 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:52,373 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:52,373 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:52,374 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T06:30:52,375 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:52,377 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T06:30:52,378 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=736692, jitterRate=-0.06324809789657593}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T06:30:52,379 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731220252359Initializing all the Stores at 1731220252360 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220252360Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220252360Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220252360Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220252360Cleaning up temporary data from old regions at 1731220252373 (+13 ms)Region opened successfully at 1731220252379 (+6 ms) 2024-11-10T06:30:52,379 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T06:30:52,383 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79de13cd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4999977c7e1b/172.17.0.2:0 2024-11-10T06:30:52,384 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T06:30:52,384 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T06:30:52,384 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T06:30:52,385 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T06:30:52,385 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-10T06:30:52,386 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-10T06:30:52,386 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T06:30:52,389 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T06:30:52,389 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T06:30:52,393 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T06:30:52,393 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T06:30:52,394 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T06:30:52,396 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T06:30:52,396 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T06:30:52,397 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T06:30:52,399 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T06:30:52,399 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T06:30:52,401 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T06:30:52,403 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T06:30:52,405 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T06:30:52,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T06:30:52,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T06:30:52,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:52,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:52,407 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=4999977c7e1b,39153,1731220252176, sessionid=0x10190e01b660000, setting cluster-up flag (Was=false) 2024-11-10T06:30:52,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:52,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:52,416 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T06:30:52,418 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4999977c7e1b,39153,1731220252176 2024-11-10T06:30:52,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:52,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:52,429 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T06:30:52,430 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4999977c7e1b,39153,1731220252176 2024-11-10T06:30:52,432 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T06:30:52,434 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T06:30:52,434 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T06:30:52,434 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T06:30:52,434 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 4999977c7e1b,39153,1731220252176 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T06:30:52,436 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:30:52,436 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:30:52,436 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:30:52,436 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:30:52,436 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/4999977c7e1b:0, corePoolSize=10, maxPoolSize=10 2024-11-10T06:30:52,436 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:52,436 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/4999977c7e1b:0, corePoolSize=2, maxPoolSize=2 2024-11-10T06:30:52,436 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:52,437 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731220282437 2024-11-10T06:30:52,437 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T06:30:52,437 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T06:30:52,437 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T06:30:52,437 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T06:30:52,437 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T06:30:52,437 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T06:30:52,438 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:52,438 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T06:30:52,438 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:30:52,438 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T06:30:52,438 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T06:30:52,438 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T06:30:52,438 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T06:30:52,438 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T06:30:52,439 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220252439,5,FailOnTimeoutGroup] 2024-11-10T06:30:52,439 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220252439,5,FailOnTimeoutGroup] 2024-11-10T06:30:52,439 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:52,439 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T06:30:52,439 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:52,439 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:52,439 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:52,439 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T06:30:52,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46721 is added to blk_1073741831_1007 (size=1321) 2024-11-10T06:30:52,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41591 is added to blk_1073741831_1007 (size=1321) 2024-11-10T06:30:52,448 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T06:30:52,449 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692 2024-11-10T06:30:52,450 INFO [RS:0;4999977c7e1b:34437 {}] regionserver.HRegionServer(746): ClusterId : 8424b2db-03bd-420a-bcaf-86fb29cb5ee4 2024-11-10T06:30:52,450 DEBUG [RS:0;4999977c7e1b:34437 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T06:30:52,453 DEBUG [RS:0;4999977c7e1b:34437 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T06:30:52,453 DEBUG [RS:0;4999977c7e1b:34437 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T06:30:52,456 DEBUG [RS:0;4999977c7e1b:34437 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T06:30:52,456 DEBUG [RS:0;4999977c7e1b:34437 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f0a0b85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4999977c7e1b/172.17.0.2:0 2024-11-10T06:30:52,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41591 is added to blk_1073741832_1008 (size=32) 2024-11-10T06:30:52,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46721 is added to blk_1073741832_1008 (size=32) 2024-11-10T06:30:52,457 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:30:52,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T06:30:52,461 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T06:30:52,461 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:52,462 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:30:52,462 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T06:30:52,463 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T06:30:52,463 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:52,464 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:30:52,464 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T06:30:52,466 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T06:30:52,466 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:52,466 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:30:52,467 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T06:30:52,468 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T06:30:52,468 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:52,469 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:30:52,469 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T06:30:52,470 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/data/hbase/meta/1588230740 2024-11-10T06:30:52,470 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/data/hbase/meta/1588230740 2024-11-10T06:30:52,472 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T06:30:52,472 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T06:30:52,473 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T06:30:52,473 DEBUG [RS:0;4999977c7e1b:34437 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;4999977c7e1b:34437 2024-11-10T06:30:52,474 INFO [RS:0;4999977c7e1b:34437 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T06:30:52,474 INFO [RS:0;4999977c7e1b:34437 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T06:30:52,474 DEBUG [RS:0;4999977c7e1b:34437 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T06:30:52,474 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T06:30:52,475 INFO [RS:0;4999977c7e1b:34437 {}] regionserver.HRegionServer(2659): reportForDuty to master=4999977c7e1b,39153,1731220252176 with port=34437, startcode=1731220252227 2024-11-10T06:30:52,475 DEBUG [RS:0;4999977c7e1b:34437 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T06:30:52,478 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T06:30:52,479 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=725947, jitterRate=-0.07691113650798798}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T06:30:52,479 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48013, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T06:30:52,480 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39153 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4999977c7e1b,34437,1731220252227 2024-11-10T06:30:52,480 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39153 {}] master.ServerManager(517): Registering regionserver=4999977c7e1b,34437,1731220252227 2024-11-10T06:30:52,480 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731220252457Initializing all the Stores at 1731220252459 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220252459Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220252459Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220252459Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220252459Cleaning up temporary data from old regions at 1731220252472 (+13 ms)Region opened successfully at 1731220252480 (+8 ms) 2024-11-10T06:30:52,480 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T06:30:52,480 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T06:30:52,480 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T06:30:52,480 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T06:30:52,480 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T06:30:52,481 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T06:30:52,481 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731220252480Disabling compacts and flushes for region at 1731220252480Disabling writes for close at 1731220252480Writing region close event to WAL at 1731220252481 (+1 ms)Closed at 1731220252481 2024-11-10T06:30:52,483 DEBUG [RS:0;4999977c7e1b:34437 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692 2024-11-10T06:30:52,483 DEBUG [RS:0;4999977c7e1b:34437 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44957 2024-11-10T06:30:52,483 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:30:52,483 DEBUG [RS:0;4999977c7e1b:34437 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T06:30:52,483 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T06:30:52,483 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T06:30:52,485 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T06:30:52,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T06:30:52,486 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T06:30:52,486 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4999977c7e1b,34437,1731220252227] 2024-11-10T06:30:52,487 DEBUG [RS:0;4999977c7e1b:34437 {}] zookeeper.ZKUtil(111): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4999977c7e1b,34437,1731220252227 2024-11-10T06:30:52,487 WARN [RS:0;4999977c7e1b:34437 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T06:30:52,487 INFO [RS:0;4999977c7e1b:34437 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:30:52,487 DEBUG [RS:0;4999977c7e1b:34437 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/WALs/4999977c7e1b,34437,1731220252227 2024-11-10T06:30:52,495 INFO [RS:0;4999977c7e1b:34437 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T06:30:52,500 INFO [RS:0;4999977c7e1b:34437 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T06:30:52,500 INFO [RS:0;4999977c7e1b:34437 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T06:30:52,500 INFO [RS:0;4999977c7e1b:34437 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:52,502 INFO [RS:0;4999977c7e1b:34437 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T06:30:52,503 INFO [RS:0;4999977c7e1b:34437 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T06:30:52,503 INFO [RS:0;4999977c7e1b:34437 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:52,503 DEBUG [RS:0;4999977c7e1b:34437 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:52,503 DEBUG [RS:0;4999977c7e1b:34437 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:52,504 DEBUG [RS:0;4999977c7e1b:34437 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:52,504 DEBUG [RS:0;4999977c7e1b:34437 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:52,504 DEBUG [RS:0;4999977c7e1b:34437 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:52,504 DEBUG [RS:0;4999977c7e1b:34437 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4999977c7e1b:0, corePoolSize=2, maxPoolSize=2 2024-11-10T06:30:52,504 DEBUG [RS:0;4999977c7e1b:34437 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:52,504 DEBUG [RS:0;4999977c7e1b:34437 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:52,504 DEBUG [RS:0;4999977c7e1b:34437 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:52,504 DEBUG [RS:0;4999977c7e1b:34437 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:52,504 DEBUG [RS:0;4999977c7e1b:34437 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:52,504 DEBUG [RS:0;4999977c7e1b:34437 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:52,504 DEBUG [RS:0;4999977c7e1b:34437 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4999977c7e1b:0, corePoolSize=3, maxPoolSize=3 2024-11-10T06:30:52,504 DEBUG [RS:0;4999977c7e1b:34437 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0, corePoolSize=3, maxPoolSize=3 2024-11-10T06:30:52,505 INFO [RS:0;4999977c7e1b:34437 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:52,505 INFO [RS:0;4999977c7e1b:34437 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:52,505 INFO [RS:0;4999977c7e1b:34437 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:52,505 INFO [RS:0;4999977c7e1b:34437 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:52,505 INFO [RS:0;4999977c7e1b:34437 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:52,505 INFO [RS:0;4999977c7e1b:34437 {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,34437,1731220252227-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T06:30:52,521 INFO [RS:0;4999977c7e1b:34437 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T06:30:52,522 INFO [RS:0;4999977c7e1b:34437 {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,34437,1731220252227-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:52,522 INFO [RS:0;4999977c7e1b:34437 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:52,522 INFO [RS:0;4999977c7e1b:34437 {}] regionserver.Replication(171): 4999977c7e1b,34437,1731220252227 started 2024-11-10T06:30:52,538 INFO [RS:0;4999977c7e1b:34437 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:52,538 INFO [RS:0;4999977c7e1b:34437 {}] regionserver.HRegionServer(1482): Serving as 4999977c7e1b,34437,1731220252227, RpcServer on 4999977c7e1b/172.17.0.2:34437, sessionid=0x10190e01b660001 2024-11-10T06:30:52,538 DEBUG [RS:0;4999977c7e1b:34437 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T06:30:52,538 DEBUG [RS:0;4999977c7e1b:34437 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4999977c7e1b,34437,1731220252227 2024-11-10T06:30:52,538 DEBUG [RS:0;4999977c7e1b:34437 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4999977c7e1b,34437,1731220252227' 2024-11-10T06:30:52,538 DEBUG [RS:0;4999977c7e1b:34437 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T06:30:52,539 DEBUG [RS:0;4999977c7e1b:34437 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T06:30:52,540 DEBUG [RS:0;4999977c7e1b:34437 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T06:30:52,540 DEBUG [RS:0;4999977c7e1b:34437 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T06:30:52,540 DEBUG [RS:0;4999977c7e1b:34437 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4999977c7e1b,34437,1731220252227 2024-11-10T06:30:52,540 DEBUG [RS:0;4999977c7e1b:34437 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4999977c7e1b,34437,1731220252227' 2024-11-10T06:30:52,540 DEBUG [RS:0;4999977c7e1b:34437 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T06:30:52,540 DEBUG [RS:0;4999977c7e1b:34437 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T06:30:52,541 DEBUG [RS:0;4999977c7e1b:34437 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T06:30:52,541 INFO [RS:0;4999977c7e1b:34437 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T06:30:52,541 INFO [RS:0;4999977c7e1b:34437 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T06:30:52,637 WARN [4999977c7e1b:39153 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-10T06:30:52,644 INFO [RS:0;4999977c7e1b:34437 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C34437%2C1731220252227, suffix=, logDir=hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/WALs/4999977c7e1b,34437,1731220252227, archiveDir=hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/oldWALs, maxLogs=32 2024-11-10T06:30:52,646 INFO [RS:0;4999977c7e1b:34437 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C34437%2C1731220252227.1731220252645 2024-11-10T06:30:52,653 INFO [RS:0;4999977c7e1b:34437 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/WALs/4999977c7e1b,34437,1731220252227/4999977c7e1b%2C34437%2C1731220252227.1731220252645 2024-11-10T06:30:52,653 DEBUG [RS:0;4999977c7e1b:34437 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39171:39171),(127.0.0.1/127.0.0.1:42003:42003)] 2024-11-10T06:30:52,887 DEBUG [4999977c7e1b:39153 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-10T06:30:52,888 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=4999977c7e1b,34437,1731220252227 2024-11-10T06:30:52,890 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4999977c7e1b,34437,1731220252227, state=OPENING 2024-11-10T06:30:52,891 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T06:30:52,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:52,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:52,894 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T06:30:52,894 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:30:52,894 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:30:52,894 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=4999977c7e1b,34437,1731220252227}] 2024-11-10T06:30:53,048 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T06:30:53,050 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41411, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T06:30:53,055 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T06:30:53,055 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:30:53,057 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C34437%2C1731220252227.meta, suffix=.meta, logDir=hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/WALs/4999977c7e1b,34437,1731220252227, archiveDir=hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/oldWALs, maxLogs=32 2024-11-10T06:30:53,059 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C34437%2C1731220252227.meta.1731220253059.meta 2024-11-10T06:30:53,065 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/WALs/4999977c7e1b,34437,1731220252227/4999977c7e1b%2C34437%2C1731220252227.meta.1731220253059.meta 2024-11-10T06:30:53,066 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39171:39171),(127.0.0.1/127.0.0.1:42003:42003)] 2024-11-10T06:30:53,067 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T06:30:53,067 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T06:30:53,067 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T06:30:53,067 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T06:30:53,068 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T06:30:53,068 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:30:53,068 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T06:30:53,068 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T06:30:53,069 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T06:30:53,071 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T06:30:53,071 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:53,071 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:30:53,071 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T06:30:53,072 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T06:30:53,072 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:53,073 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:30:53,073 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T06:30:53,074 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T06:30:53,074 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:53,074 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:30:53,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T06:30:53,075 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T06:30:53,075 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:53,076 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:30:53,076 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T06:30:53,077 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/data/hbase/meta/1588230740 2024-11-10T06:30:53,078 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/data/hbase/meta/1588230740 2024-11-10T06:30:53,080 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T06:30:53,080 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T06:30:53,081 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T06:30:53,082 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T06:30:53,083 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=715295, jitterRate=-0.09045581519603729}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T06:30:53,083 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T06:30:53,084 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731220253068Writing region info on filesystem at 1731220253068Initializing all the Stores at 1731220253069 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220253069Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220253069Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220253069Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220253069Cleaning up temporary data from old regions at 1731220253080 (+11 ms)Running coprocessor post-open hooks at 1731220253083 (+3 ms)Region opened successfully at 1731220253084 (+1 ms) 2024-11-10T06:30:53,085 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731220253048 2024-11-10T06:30:53,088 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T06:30:53,088 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T06:30:53,089 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=4999977c7e1b,34437,1731220252227 2024-11-10T06:30:53,091 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4999977c7e1b,34437,1731220252227, state=OPEN 2024-11-10T06:30:53,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T06:30:53,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T06:30:53,094 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=4999977c7e1b,34437,1731220252227 2024-11-10T06:30:53,094 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:30:53,094 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:30:53,098 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T06:30:53,098 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=4999977c7e1b,34437,1731220252227 in 200 msec 2024-11-10T06:30:53,101 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T06:30:53,101 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 615 msec 2024-11-10T06:30:53,102 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:30:53,102 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T06:30:53,104 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T06:30:53,104 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4999977c7e1b,34437,1731220252227, seqNum=-1] 2024-11-10T06:30:53,104 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T06:30:53,106 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50709, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T06:30:53,112 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 678 msec 2024-11-10T06:30:53,113 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731220253112, completionTime=-1 2024-11-10T06:30:53,113 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-10T06:30:53,113 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-10T06:30:53,115 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-10T06:30:53,115 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731220313115 2024-11-10T06:30:53,115 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731220373115 2024-11-10T06:30:53,115 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-10T06:30:53,115 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,39153,1731220252176-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:53,115 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,39153,1731220252176-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:53,115 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,39153,1731220252176-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:53,115 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-4999977c7e1b:39153, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:53,115 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:53,116 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:53,117 DEBUG [master/4999977c7e1b:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T06:30:53,120 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.850sec 2024-11-10T06:30:53,120 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T06:30:53,120 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T06:30:53,120 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T06:30:53,120 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T06:30:53,120 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T06:30:53,121 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,39153,1731220252176-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T06:30:53,121 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,39153,1731220252176-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T06:30:53,123 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T06:30:53,123 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T06:30:53,123 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,39153,1731220252176-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:53,150 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6544715e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:30:53,150 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 4999977c7e1b,39153,-1 for getting cluster id 2024-11-10T06:30:53,150 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T06:30:53,152 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8424b2db-03bd-420a-bcaf-86fb29cb5ee4' 2024-11-10T06:30:53,153 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T06:30:53,153 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8424b2db-03bd-420a-bcaf-86fb29cb5ee4" 2024-11-10T06:30:53,154 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14f7577d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:30:53,154 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4999977c7e1b,39153,-1] 2024-11-10T06:30:53,154 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T06:30:53,154 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:30:53,156 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54020, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T06:30:53,157 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a2acd0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:30:53,157 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T06:30:53,158 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4999977c7e1b,34437,1731220252227, seqNum=-1] 2024-11-10T06:30:53,159 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T06:30:53,160 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54930, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T06:30:53,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=4999977c7e1b,39153,1731220252176 2024-11-10T06:30:53,163 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:30:53,166 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-10T06:30:53,166 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T06:30:53,166 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T06:30:53,166 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:30:53,166 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:30:53,167 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:30:53,167 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T06:30:53,167 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T06:30:53,167 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1771574582, stopped=false 2024-11-10T06:30:53,167 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=4999977c7e1b,39153,1731220252176 2024-11-10T06:30:53,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T06:30:53,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T06:30:53,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:53,169 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T06:30:53,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:53,169 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T06:30:53,169 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:30:53,169 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:30:53,170 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:30:53,170 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:30:53,170 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4999977c7e1b,34437,1731220252227' ***** 2024-11-10T06:30:53,170 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T06:30:53,170 INFO [RS:0;4999977c7e1b:34437 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T06:30:53,170 INFO [RS:0;4999977c7e1b:34437 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T06:30:53,170 INFO [RS:0;4999977c7e1b:34437 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T06:30:53,170 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T06:30:53,170 INFO [RS:0;4999977c7e1b:34437 {}] regionserver.HRegionServer(959): stopping server 4999977c7e1b,34437,1731220252227 2024-11-10T06:30:53,170 INFO [RS:0;4999977c7e1b:34437 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T06:30:53,171 INFO [RS:0;4999977c7e1b:34437 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;4999977c7e1b:34437. 2024-11-10T06:30:53,171 DEBUG [RS:0;4999977c7e1b:34437 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:30:53,171 DEBUG [RS:0;4999977c7e1b:34437 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:30:53,171 INFO [RS:0;4999977c7e1b:34437 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T06:30:53,171 INFO [RS:0;4999977c7e1b:34437 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T06:30:53,171 INFO [RS:0;4999977c7e1b:34437 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T06:30:53,171 INFO [RS:0;4999977c7e1b:34437 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T06:30:53,171 INFO [RS:0;4999977c7e1b:34437 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-10T06:30:53,171 DEBUG [RS:0;4999977c7e1b:34437 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-10T06:30:53,171 DEBUG [RS:0;4999977c7e1b:34437 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-10T06:30:53,171 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T06:30:53,172 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T06:30:53,172 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T06:30:53,172 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T06:30:53,172 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T06:30:53,172 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-10T06:30:53,191 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/data/hbase/meta/1588230740/.tmp/ns/25937079d3d040b2bd7115c828c20ec0 is 43, key is default/ns:d/1731220253107/Put/seqid=0 2024-11-10T06:30:53,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46721 is added to blk_1073741835_1011 (size=5153) 2024-11-10T06:30:53,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41591 is added to blk_1073741835_1011 (size=5153) 2024-11-10T06:30:53,198 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/data/hbase/meta/1588230740/.tmp/ns/25937079d3d040b2bd7115c828c20ec0 2024-11-10T06:30:53,206 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/data/hbase/meta/1588230740/.tmp/ns/25937079d3d040b2bd7115c828c20ec0 as hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/data/hbase/meta/1588230740/ns/25937079d3d040b2bd7115c828c20ec0 2024-11-10T06:30:53,213 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/data/hbase/meta/1588230740/ns/25937079d3d040b2bd7115c828c20ec0, entries=2, sequenceid=6, filesize=5.0 K 2024-11-10T06:30:53,215 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 43ms, sequenceid=6, compaction requested=false 2024-11-10T06:30:53,215 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-10T06:30:53,220 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-10T06:30:53,221 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T06:30:53,221 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T06:30:53,221 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731220253171Running coprocessor pre-close hooks at 1731220253171Disabling compacts and flushes for region at 1731220253171Disabling writes for close at 1731220253172 (+1 ms)Obtaining lock to block concurrent updates at 1731220253172Preparing flush snapshotting stores in 1588230740 at 1731220253172Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731220253173 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731220253174 (+1 ms)Flushing 1588230740/ns: creating writer at 1731220253174Flushing 1588230740/ns: appending metadata at 1731220253191 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731220253191Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28553fb6: reopening flushed file at 1731220253205 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 43ms, sequenceid=6, compaction requested=false at 1731220253215 (+10 ms)Writing region close event to WAL at 1731220253216 (+1 ms)Running coprocessor post-close hooks at 1731220253221 (+5 ms)Closed at 1731220253221 2024-11-10T06:30:53,221 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T06:30:53,372 INFO [RS:0;4999977c7e1b:34437 {}] regionserver.HRegionServer(976): stopping server 4999977c7e1b,34437,1731220252227; all regions closed. 2024-11-10T06:30:53,372 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:53,372 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:53,372 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:53,373 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:53,373 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:53,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46721 is added to blk_1073741834_1010 (size=1152) 2024-11-10T06:30:53,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41591 is added to blk_1073741834_1010 (size=1152) 2024-11-10T06:30:53,378 DEBUG [RS:0;4999977c7e1b:34437 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/oldWALs 2024-11-10T06:30:53,378 INFO [RS:0;4999977c7e1b:34437 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 4999977c7e1b%2C34437%2C1731220252227.meta:.meta(num 1731220253059) 2024-11-10T06:30:53,379 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:53,379 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:53,379 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:53,379 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:53,379 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:53,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46721 is added to blk_1073741833_1009 (size=93) 2024-11-10T06:30:53,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41591 is added to blk_1073741833_1009 (size=93) 2024-11-10T06:30:53,384 DEBUG [RS:0;4999977c7e1b:34437 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/oldWALs 2024-11-10T06:30:53,384 INFO [RS:0;4999977c7e1b:34437 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 4999977c7e1b%2C34437%2C1731220252227:(num 1731220252645) 2024-11-10T06:30:53,384 DEBUG [RS:0;4999977c7e1b:34437 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:30:53,384 INFO [RS:0;4999977c7e1b:34437 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T06:30:53,384 INFO [RS:0;4999977c7e1b:34437 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T06:30:53,384 INFO [RS:0;4999977c7e1b:34437 {}] hbase.ChoreService(370): Chore service for: regionserver/4999977c7e1b:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-10T06:30:53,384 INFO [RS:0;4999977c7e1b:34437 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T06:30:53,384 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T06:30:53,385 INFO [RS:0;4999977c7e1b:34437 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34437 2024-11-10T06:30:53,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4999977c7e1b,34437,1731220252227 2024-11-10T06:30:53,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T06:30:53,387 INFO [RS:0;4999977c7e1b:34437 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T06:30:53,388 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4999977c7e1b,34437,1731220252227] 2024-11-10T06:30:53,390 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4999977c7e1b,34437,1731220252227 already deleted, retry=false 2024-11-10T06:30:53,390 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4999977c7e1b,34437,1731220252227 expired; onlineServers=0 2024-11-10T06:30:53,390 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '4999977c7e1b,39153,1731220252176' ***** 2024-11-10T06:30:53,390 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T06:30:53,390 INFO [M:0;4999977c7e1b:39153 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T06:30:53,390 INFO [M:0;4999977c7e1b:39153 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T06:30:53,391 DEBUG [M:0;4999977c7e1b:39153 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T06:30:53,391 DEBUG [M:0;4999977c7e1b:39153 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T06:30:53,391 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T06:30:53,391 DEBUG [master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220252439 {}] cleaner.HFileCleaner(306): Exit Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220252439,5,FailOnTimeoutGroup] 2024-11-10T06:30:53,391 DEBUG [master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220252439 {}] cleaner.HFileCleaner(306): Exit Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220252439,5,FailOnTimeoutGroup] 2024-11-10T06:30:53,391 INFO [M:0;4999977c7e1b:39153 {}] hbase.ChoreService(370): Chore service for: master/4999977c7e1b:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T06:30:53,391 INFO [M:0;4999977c7e1b:39153 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T06:30:53,391 DEBUG [M:0;4999977c7e1b:39153 {}] master.HMaster(1795): Stopping service threads 2024-11-10T06:30:53,391 INFO [M:0;4999977c7e1b:39153 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T06:30:53,391 INFO [M:0;4999977c7e1b:39153 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T06:30:53,391 INFO [M:0;4999977c7e1b:39153 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T06:30:53,392 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T06:30:53,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T06:30:53,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:53,393 DEBUG [M:0;4999977c7e1b:39153 {}] zookeeper.ZKUtil(347): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T06:30:53,393 WARN [M:0;4999977c7e1b:39153 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T06:30:53,393 INFO [M:0;4999977c7e1b:39153 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/.lastflushedseqids 2024-11-10T06:30:53,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46721 is added to blk_1073741836_1012 (size=99) 2024-11-10T06:30:53,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41591 is added to blk_1073741836_1012 (size=99) 2024-11-10T06:30:53,403 INFO [M:0;4999977c7e1b:39153 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T06:30:53,403 INFO [M:0;4999977c7e1b:39153 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T06:30:53,404 DEBUG [M:0;4999977c7e1b:39153 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T06:30:53,404 INFO [M:0;4999977c7e1b:39153 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:30:53,404 DEBUG [M:0;4999977c7e1b:39153 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:30:53,404 DEBUG [M:0;4999977c7e1b:39153 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T06:30:53,404 DEBUG [M:0;4999977c7e1b:39153 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:30:53,405 INFO [M:0;4999977c7e1b:39153 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-10T06:30:53,423 DEBUG [M:0;4999977c7e1b:39153 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c95195fa5c4b4f5f8b3aed6a11e0e387 is 82, key is hbase:meta,,1/info:regioninfo/1731220253089/Put/seqid=0 2024-11-10T06:30:53,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41591 is added to blk_1073741837_1013 (size=5672) 2024-11-10T06:30:53,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46721 is added to blk_1073741837_1013 (size=5672) 2024-11-10T06:30:53,431 INFO [M:0;4999977c7e1b:39153 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c95195fa5c4b4f5f8b3aed6a11e0e387 2024-11-10T06:30:53,454 DEBUG [M:0;4999977c7e1b:39153 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/10a203a4d24f4b5d8900e05c5a9ee9be is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731220253112/Put/seqid=0 2024-11-10T06:30:53,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41591 is added to blk_1073741838_1014 (size=5275) 2024-11-10T06:30:53,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46721 is added to blk_1073741838_1014 (size=5275) 2024-11-10T06:30:53,460 INFO [M:0;4999977c7e1b:39153 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/10a203a4d24f4b5d8900e05c5a9ee9be 2024-11-10T06:30:53,482 DEBUG [M:0;4999977c7e1b:39153 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/68fc2c24fcd34e40b82717f47363b958 is 69, key is 4999977c7e1b,34437,1731220252227/rs:state/1731220252480/Put/seqid=0 2024-11-10T06:30:53,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46721 is added to blk_1073741839_1015 (size=5156) 2024-11-10T06:30:53,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:30:53,489 INFO [RS:0;4999977c7e1b:34437 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T06:30:53,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34437-0x10190e01b660001, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:30:53,489 INFO [RS:0;4999977c7e1b:34437 {}] regionserver.HRegionServer(1031): Exiting; stopping=4999977c7e1b,34437,1731220252227; zookeeper connection closed. 2024-11-10T06:30:53,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41591 is added to blk_1073741839_1015 (size=5156) 2024-11-10T06:30:53,489 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1ca141 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1ca141 2024-11-10T06:30:53,489 INFO [M:0;4999977c7e1b:39153 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/68fc2c24fcd34e40b82717f47363b958 2024-11-10T06:30:53,490 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-10T06:30:53,513 DEBUG [M:0;4999977c7e1b:39153 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/caf3b42aa743416db2d6f151ad902861 is 52, key is load_balancer_on/state:d/1731220253165/Put/seqid=0 2024-11-10T06:30:53,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41591 is added to blk_1073741840_1016 (size=5056) 2024-11-10T06:30:53,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46721 is added to blk_1073741840_1016 (size=5056) 2024-11-10T06:30:53,520 INFO [M:0;4999977c7e1b:39153 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/caf3b42aa743416db2d6f151ad902861 2024-11-10T06:30:53,527 DEBUG [M:0;4999977c7e1b:39153 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c95195fa5c4b4f5f8b3aed6a11e0e387 as hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c95195fa5c4b4f5f8b3aed6a11e0e387 2024-11-10T06:30:53,534 INFO [M:0;4999977c7e1b:39153 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c95195fa5c4b4f5f8b3aed6a11e0e387, entries=8, sequenceid=29, filesize=5.5 K 2024-11-10T06:30:53,535 DEBUG [M:0;4999977c7e1b:39153 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/10a203a4d24f4b5d8900e05c5a9ee9be as hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/10a203a4d24f4b5d8900e05c5a9ee9be 2024-11-10T06:30:53,541 INFO [M:0;4999977c7e1b:39153 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/10a203a4d24f4b5d8900e05c5a9ee9be, entries=3, sequenceid=29, filesize=5.2 K 2024-11-10T06:30:53,543 DEBUG [M:0;4999977c7e1b:39153 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/68fc2c24fcd34e40b82717f47363b958 as hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/68fc2c24fcd34e40b82717f47363b958 2024-11-10T06:30:53,550 INFO [M:0;4999977c7e1b:39153 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/68fc2c24fcd34e40b82717f47363b958, entries=1, sequenceid=29, filesize=5.0 K 2024-11-10T06:30:53,551 DEBUG [M:0;4999977c7e1b:39153 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/caf3b42aa743416db2d6f151ad902861 as hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/caf3b42aa743416db2d6f151ad902861 2024-11-10T06:30:53,559 INFO [M:0;4999977c7e1b:39153 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44957/user/jenkins/test-data/913fa853-1448-40c1-4edc-b30a6ca1d692/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/caf3b42aa743416db2d6f151ad902861, entries=1, sequenceid=29, filesize=4.9 K 2024-11-10T06:30:53,560 INFO [M:0;4999977c7e1b:39153 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 156ms, sequenceid=29, compaction requested=false 2024-11-10T06:30:53,562 INFO [M:0;4999977c7e1b:39153 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:30:53,563 DEBUG [M:0;4999977c7e1b:39153 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731220253404Disabling compacts and flushes for region at 1731220253404Disabling writes for close at 1731220253404Obtaining lock to block concurrent updates at 1731220253405 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731220253405Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731220253405Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731220253406 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731220253406Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731220253423 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731220253423Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731220253437 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731220253453 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731220253453Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731220253465 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731220253481 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731220253481Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731220253496 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731220253512 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731220253512Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1610fc31: reopening flushed file at 1731220253526 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2941c1: reopening flushed file at 1731220253534 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b6cba39: reopening flushed file at 1731220253542 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6e698670: reopening flushed file at 1731220253550 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 156ms, sequenceid=29, compaction requested=false at 1731220253561 (+11 ms)Writing region close event to WAL at 1731220253562 (+1 ms)Closed at 1731220253562 2024-11-10T06:30:53,563 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:53,563 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:53,563 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:53,564 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:53,564 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:30:53,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41591 is added to blk_1073741830_1006 (size=10311) 2024-11-10T06:30:53,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46721 is added to blk_1073741830_1006 (size=10311) 2024-11-10T06:30:53,567 INFO [M:0;4999977c7e1b:39153 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T06:30:53,567 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T06:30:53,567 INFO [M:0;4999977c7e1b:39153 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39153 2024-11-10T06:30:53,568 INFO [M:0;4999977c7e1b:39153 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T06:30:53,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:30:53,670 INFO [M:0;4999977c7e1b:39153 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T06:30:53,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39153-0x10190e01b660000, quorum=127.0.0.1:51185, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:30:53,673 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@d071559{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:30:53,673 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ece158b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:30:53,673 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:30:53,673 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d87dfcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:30:53,673 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74fcc9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/hadoop.log.dir/,STOPPED} 2024-11-10T06:30:53,675 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:30:53,675 WARN [BP-505658187-172.17.0.2-1731220251466 heartbeating to localhost/127.0.0.1:44957 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:30:53,675 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:30:53,675 WARN [BP-505658187-172.17.0.2-1731220251466 heartbeating to localhost/127.0.0.1:44957 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-505658187-172.17.0.2-1731220251466 (Datanode Uuid 08c1a089-8dbc-4fbe-b842-2264d2379583) service to localhost/127.0.0.1:44957 2024-11-10T06:30:53,676 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/cluster_36a5731b-0275-4d73-7c1f-12ead882a745/data/data3/current/BP-505658187-172.17.0.2-1731220251466 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:30:53,676 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/cluster_36a5731b-0275-4d73-7c1f-12ead882a745/data/data4/current/BP-505658187-172.17.0.2-1731220251466 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:30:53,676 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:30:53,678 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2a90f125{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:30:53,678 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7b8beccb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:30:53,678 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:30:53,679 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@695feae9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:30:53,679 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@232ba44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/hadoop.log.dir/,STOPPED} 2024-11-10T06:30:53,680 WARN [BP-505658187-172.17.0.2-1731220251466 heartbeating to localhost/127.0.0.1:44957 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:30:53,680 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:30:53,680 WARN [BP-505658187-172.17.0.2-1731220251466 heartbeating to localhost/127.0.0.1:44957 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-505658187-172.17.0.2-1731220251466 (Datanode Uuid c044b893-beb2-4522-b571-7358606edc0c) service to localhost/127.0.0.1:44957 2024-11-10T06:30:53,680 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:30:53,681 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/cluster_36a5731b-0275-4d73-7c1f-12ead882a745/data/data1/current/BP-505658187-172.17.0.2-1731220251466 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:30:53,681 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/cluster_36a5731b-0275-4d73-7c1f-12ead882a745/data/data2/current/BP-505658187-172.17.0.2-1731220251466 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:30:53,681 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:30:53,687 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6c790f33{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T06:30:53,688 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@488dcf65{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:30:53,688 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:30:53,688 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c2cf0b6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:30:53,688 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19bf40f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/hadoop.log.dir/,STOPPED} 2024-11-10T06:30:53,695 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-10T06:30:53,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-10T06:30:53,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T06:30:53,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/hadoop.log.dir so I do NOT create it in target/test-data/9785572f-cfab-81d7-23d2-631797d24c96 2024-11-10T06:30:53,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/28fac381-4ba2-206b-9328-99f79a5a86de/hadoop.tmp.dir so I do NOT create it in target/test-data/9785572f-cfab-81d7-23d2-631797d24c96 2024-11-10T06:30:53,711 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993, deleteOnExit=true 2024-11-10T06:30:53,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-10T06:30:53,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/test.cache.data in system properties and HBase conf 2024-11-10T06:30:53,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T06:30:53,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/hadoop.log.dir in system properties and HBase conf 2024-11-10T06:30:53,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T06:30:53,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T06:30:53,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T06:30:53,712 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T06:30:53,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T06:30:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T06:30:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T06:30:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T06:30:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T06:30:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T06:30:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T06:30:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T06:30:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T06:30:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/nfs.dump.dir in system properties and HBase conf 2024-11-10T06:30:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/java.io.tmpdir in system properties and HBase conf 2024-11-10T06:30:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T06:30:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T06:30:53,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T06:30:53,728 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T06:30:53,805 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:30:53,811 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:30:53,812 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:30:53,812 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:30:53,812 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T06:30:53,813 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:30:53,813 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3789f604{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:30:53,814 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7447e09f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:30:53,929 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@618abfb4{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/java.io.tmpdir/jetty-localhost-37171-hadoop-hdfs-3_4_1-tests_jar-_-any-14113683855027470276/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T06:30:53,930 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1585275{HTTP/1.1, (http/1.1)}{localhost:37171} 2024-11-10T06:30:53,930 INFO [Time-limited test {}] server.Server(415): Started @103631ms 2024-11-10T06:30:53,945 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T06:30:54,012 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:30:54,016 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:30:54,017 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:30:54,017 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:30:54,017 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T06:30:54,018 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@778fdefb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:30:54,018 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6cb8e75e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:30:54,134 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@24d45158{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/java.io.tmpdir/jetty-localhost-46807-hadoop-hdfs-3_4_1-tests_jar-_-any-598283761058107549/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:30:54,135 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@76afaf39{HTTP/1.1, (http/1.1)}{localhost:46807} 2024-11-10T06:30:54,135 INFO [Time-limited test {}] server.Server(415): Started @103836ms 2024-11-10T06:30:54,137 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:30:54,172 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:30:54,175 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:30:54,176 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:30:54,176 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:30:54,176 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T06:30:54,177 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30985369{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:30:54,177 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c9fa828{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:30:54,243 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data1/current/BP-741755570-172.17.0.2-1731220253746/current, will proceed with Du for space computation calculation, 2024-11-10T06:30:54,243 WARN [Thread-658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data2/current/BP-741755570-172.17.0.2-1731220253746/current, will proceed with Du for space computation calculation, 2024-11-10T06:30:54,263 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:30:54,266 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe66b977144d9f8c6 with lease ID 0x9474d1e025b216bf: Processing first storage report for DS-724eaefd-57fa-446e-bf10-0c6346e1115f from datanode DatanodeRegistration(127.0.0.1:38361, datanodeUuid=c03a6f8f-b159-46ce-9bff-c535fb87cf89, infoPort=33573, infoSecurePort=0, ipcPort=39113, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746) 2024-11-10T06:30:54,266 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe66b977144d9f8c6 with lease ID 0x9474d1e025b216bf: from storage DS-724eaefd-57fa-446e-bf10-0c6346e1115f node DatanodeRegistration(127.0.0.1:38361, datanodeUuid=c03a6f8f-b159-46ce-9bff-c535fb87cf89, infoPort=33573, infoSecurePort=0, ipcPort=39113, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:30:54,266 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe66b977144d9f8c6 with lease ID 0x9474d1e025b216bf: Processing first storage report for DS-73f38801-50f7-4d8b-93a1-a0f0bf51f76f from datanode DatanodeRegistration(127.0.0.1:38361, datanodeUuid=c03a6f8f-b159-46ce-9bff-c535fb87cf89, infoPort=33573, infoSecurePort=0, ipcPort=39113, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746) 2024-11-10T06:30:54,266 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe66b977144d9f8c6 with lease ID 0x9474d1e025b216bf: from storage DS-73f38801-50f7-4d8b-93a1-a0f0bf51f76f node DatanodeRegistration(127.0.0.1:38361, datanodeUuid=c03a6f8f-b159-46ce-9bff-c535fb87cf89, infoPort=33573, infoSecurePort=0, ipcPort=39113, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:30:54,295 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3893f69{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/java.io.tmpdir/jetty-localhost-33919-hadoop-hdfs-3_4_1-tests_jar-_-any-7461261346142273512/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:30:54,296 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@206fa4ee{HTTP/1.1, (http/1.1)}{localhost:33919} 2024-11-10T06:30:54,296 INFO [Time-limited test {}] server.Server(415): Started @103996ms 2024-11-10T06:30:54,297 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:30:54,395 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data4/current/BP-741755570-172.17.0.2-1731220253746/current, will proceed with Du for space computation calculation, 2024-11-10T06:30:54,395 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data3/current/BP-741755570-172.17.0.2-1731220253746/current, will proceed with Du for space computation calculation, 2024-11-10T06:30:54,413 WARN [Thread-672 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:30:54,416 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd1002bbe320ad091 with lease ID 0x9474d1e025b216c0: Processing first storage report for DS-54e98695-2016-467c-93dc-5fe5d14e27b8 from datanode DatanodeRegistration(127.0.0.1:37021, datanodeUuid=e19f1e86-46e6-4f0d-b75d-d7b0b9a2c8c8, infoPort=41397, infoSecurePort=0, ipcPort=39687, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746) 2024-11-10T06:30:54,416 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd1002bbe320ad091 with lease ID 0x9474d1e025b216c0: from storage DS-54e98695-2016-467c-93dc-5fe5d14e27b8 node DatanodeRegistration(127.0.0.1:37021, datanodeUuid=e19f1e86-46e6-4f0d-b75d-d7b0b9a2c8c8, infoPort=41397, infoSecurePort=0, ipcPort=39687, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:30:54,416 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd1002bbe320ad091 with lease ID 0x9474d1e025b216c0: Processing first storage report for DS-914d8c7b-c2fb-4ad6-9260-57e7c6dc8b45 from datanode DatanodeRegistration(127.0.0.1:37021, datanodeUuid=e19f1e86-46e6-4f0d-b75d-d7b0b9a2c8c8, infoPort=41397, infoSecurePort=0, ipcPort=39687, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746) 2024-11-10T06:30:54,416 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd1002bbe320ad091 with lease ID 0x9474d1e025b216c0: from storage DS-914d8c7b-c2fb-4ad6-9260-57e7c6dc8b45 node DatanodeRegistration(127.0.0.1:37021, datanodeUuid=e19f1e86-46e6-4f0d-b75d-d7b0b9a2c8c8, infoPort=41397, infoSecurePort=0, ipcPort=39687, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:30:54,424 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96 2024-11-10T06:30:54,427 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/zookeeper_0, clientPort=62359, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T06:30:54,428 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62359 2024-11-10T06:30:54,428 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:30:54,430 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:30:54,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37021 is added to blk_1073741825_1001 (size=7) 2024-11-10T06:30:54,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741825_1001 (size=7) 2024-11-10T06:30:54,442 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf with version=8 2024-11-10T06:30:54,442 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/hbase-staging 2024-11-10T06:30:54,444 INFO [Time-limited test {}] client.ConnectionUtils(128): master/4999977c7e1b:0 server-side Connection retries=45 2024-11-10T06:30:54,444 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:30:54,444 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T06:30:54,444 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T06:30:54,444 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:30:54,445 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T06:30:54,445 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T06:30:54,445 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T06:30:54,445 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39613 2024-11-10T06:30:54,447 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39613 connecting to ZooKeeper ensemble=127.0.0.1:62359 2024-11-10T06:30:54,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:396130x0, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T06:30:54,454 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39613-0x10190e024420000 connected 2024-11-10T06:30:54,473 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:30:54,475 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:30:54,477 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:30:54,478 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf, hbase.cluster.distributed=false 2024-11-10T06:30:54,479 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T06:30:54,480 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39613 2024-11-10T06:30:54,480 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39613 2024-11-10T06:30:54,482 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39613 2024-11-10T06:30:54,483 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39613 2024-11-10T06:30:54,483 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39613 2024-11-10T06:30:54,500 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4999977c7e1b:0 server-side Connection retries=45 2024-11-10T06:30:54,500 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:30:54,500 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T06:30:54,500 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T06:30:54,500 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:30:54,500 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T06:30:54,500 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T06:30:54,500 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T06:30:54,501 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45891 2024-11-10T06:30:54,502 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45891 connecting to ZooKeeper ensemble=127.0.0.1:62359 2024-11-10T06:30:54,503 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:30:54,505 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:30:54,506 INFO [regionserver/4999977c7e1b:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T06:30:54,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:458910x0, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T06:30:54,511 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:458910x0, quorum=127.0.0.1:62359, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:30:54,511 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45891-0x10190e024420001 connected 2024-11-10T06:30:54,511 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T06:30:54,512 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T06:30:54,512 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45891-0x10190e024420001, quorum=127.0.0.1:62359, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T06:30:54,513 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45891-0x10190e024420001, quorum=127.0.0.1:62359, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T06:30:54,515 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45891 2024-11-10T06:30:54,515 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45891 2024-11-10T06:30:54,516 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45891 2024-11-10T06:30:54,517 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45891 2024-11-10T06:30:54,517 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45891 2024-11-10T06:30:54,532 DEBUG [M:0;4999977c7e1b:39613 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;4999977c7e1b:39613 2024-11-10T06:30:54,532 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/4999977c7e1b,39613,1731220254444 2024-11-10T06:30:54,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45891-0x10190e024420001, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:30:54,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:30:54,535 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/4999977c7e1b,39613,1731220254444 2024-11-10T06:30:54,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45891-0x10190e024420001, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T06:30:54,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:54,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45891-0x10190e024420001, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:54,537 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T06:30:54,538 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/4999977c7e1b,39613,1731220254444 from backup master directory 2024-11-10T06:30:54,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45891-0x10190e024420001, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:30:54,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/4999977c7e1b,39613,1731220254444 2024-11-10T06:30:54,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:30:54,539 WARN [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T06:30:54,540 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=4999977c7e1b,39613,1731220254444 2024-11-10T06:30:54,545 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/hbase.id] with ID: df0a8ce8-8736-4461-b62c-e36815b2dd42 2024-11-10T06:30:54,545 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/.tmp/hbase.id 2024-11-10T06:30:54,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741826_1002 (size=42) 2024-11-10T06:30:54,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37021 is added to blk_1073741826_1002 (size=42) 2024-11-10T06:30:54,553 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/.tmp/hbase.id]:[hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/hbase.id] 2024-11-10T06:30:54,566 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:30:54,566 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T06:30:54,568 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-10T06:30:54,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:54,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45891-0x10190e024420001, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:54,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741827_1003 (size=196) 2024-11-10T06:30:54,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37021 is added to blk_1073741827_1003 (size=196) 2024-11-10T06:30:54,578 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T06:30:54,579 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T06:30:54,579 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:30:54,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37021 is added to blk_1073741828_1004 (size=1189) 2024-11-10T06:30:54,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741828_1004 (size=1189) 2024-11-10T06:30:54,589 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store 2024-11-10T06:30:54,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37021 is added to blk_1073741829_1005 (size=34) 2024-11-10T06:30:54,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741829_1005 (size=34) 2024-11-10T06:30:54,597 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:30:54,597 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T06:30:54,597 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:30:54,597 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:30:54,597 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T06:30:54,597 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:30:54,597 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:30:54,597 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731220254597Disabling compacts and flushes for region at 1731220254597Disabling writes for close at 1731220254597Writing region close event to WAL at 1731220254597Closed at 1731220254597 2024-11-10T06:30:54,598 WARN [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/.initializing 2024-11-10T06:30:54,598 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/WALs/4999977c7e1b,39613,1731220254444 2024-11-10T06:30:54,601 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C39613%2C1731220254444, suffix=, logDir=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/WALs/4999977c7e1b,39613,1731220254444, archiveDir=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/oldWALs, maxLogs=10 2024-11-10T06:30:54,601 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C39613%2C1731220254444.1731220254601 2024-11-10T06:30:54,606 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/WALs/4999977c7e1b,39613,1731220254444/4999977c7e1b%2C39613%2C1731220254444.1731220254601 2024-11-10T06:30:54,608 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41397:41397),(127.0.0.1/127.0.0.1:33573:33573)] 2024-11-10T06:30:54,609 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T06:30:54,609 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:30:54,609 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:54,609 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:54,610 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:54,612 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T06:30:54,612 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:54,613 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:30:54,613 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:54,614 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T06:30:54,614 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:54,615 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:30:54,615 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:54,616 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T06:30:54,616 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:54,616 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:30:54,617 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:54,618 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T06:30:54,618 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:54,618 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:30:54,618 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:54,619 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:54,619 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:54,621 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:54,621 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:54,621 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T06:30:54,623 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:30:54,626 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T06:30:54,627 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=727741, jitterRate=-0.07462970912456512}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T06:30:54,628 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731220254609Initializing all the Stores at 1731220254610 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220254610Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220254610Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220254610Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220254610Cleaning up temporary data from old regions at 1731220254621 (+11 ms)Region opened successfully at 1731220254628 (+7 ms) 2024-11-10T06:30:54,628 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T06:30:54,632 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dbaa9e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4999977c7e1b/172.17.0.2:0 2024-11-10T06:30:54,633 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T06:30:54,633 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T06:30:54,633 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T06:30:54,633 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T06:30:54,634 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-10T06:30:54,634 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-10T06:30:54,634 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T06:30:54,637 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T06:30:54,637 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T06:30:54,639 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T06:30:54,640 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T06:30:54,640 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T06:30:54,642 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T06:30:54,642 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T06:30:54,643 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T06:30:54,645 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T06:30:54,645 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T06:30:54,647 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T06:30:54,649 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T06:30:54,652 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T06:30:54,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45891-0x10190e024420001, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T06:30:54,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T06:30:54,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45891-0x10190e024420001, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:54,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:54,654 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=4999977c7e1b,39613,1731220254444, sessionid=0x10190e024420000, setting cluster-up flag (Was=false) 2024-11-10T06:30:54,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:54,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45891-0x10190e024420001, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:54,663 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T06:30:54,664 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4999977c7e1b,39613,1731220254444 2024-11-10T06:30:54,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:54,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45891-0x10190e024420001, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:54,674 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T06:30:54,675 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4999977c7e1b,39613,1731220254444 2024-11-10T06:30:54,677 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T06:30:54,679 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T06:30:54,679 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T06:30:54,679 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T06:30:54,680 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 4999977c7e1b,39613,1731220254444 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T06:30:54,681 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:30:54,681 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:30:54,681 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:30:54,681 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:30:54,681 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/4999977c7e1b:0, corePoolSize=10, maxPoolSize=10 2024-11-10T06:30:54,681 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:54,681 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/4999977c7e1b:0, corePoolSize=2, maxPoolSize=2 2024-11-10T06:30:54,681 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:54,682 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731220284682 2024-11-10T06:30:54,682 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T06:30:54,682 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T06:30:54,682 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T06:30:54,682 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T06:30:54,682 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T06:30:54,682 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T06:30:54,683 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:54,683 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T06:30:54,683 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T06:30:54,683 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T06:30:54,684 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T06:30:54,684 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T06:30:54,684 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:30:54,684 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T06:30:54,684 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220254684,5,FailOnTimeoutGroup] 2024-11-10T06:30:54,685 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:54,685 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T06:30:54,687 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220254684,5,FailOnTimeoutGroup] 2024-11-10T06:30:54,687 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:54,687 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T06:30:54,687 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:54,687 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:54,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37021 is added to blk_1073741831_1007 (size=1321) 2024-11-10T06:30:54,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741831_1007 (size=1321) 2024-11-10T06:30:54,697 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T06:30:54,697 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf 2024-11-10T06:30:54,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37021 is added to blk_1073741832_1008 (size=32) 2024-11-10T06:30:54,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741832_1008 (size=32) 2024-11-10T06:30:54,705 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:30:54,706 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T06:30:54,708 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T06:30:54,708 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:54,709 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:30:54,709 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T06:30:54,710 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T06:30:54,710 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:54,711 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:30:54,711 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T06:30:54,712 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T06:30:54,712 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:54,713 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:30:54,713 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T06:30:54,715 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T06:30:54,715 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:54,715 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:30:54,715 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T06:30:54,716 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740 2024-11-10T06:30:54,717 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740 2024-11-10T06:30:54,718 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T06:30:54,718 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T06:30:54,718 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T06:30:54,719 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T06:30:54,721 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.HRegionServer(746): ClusterId : df0a8ce8-8736-4461-b62c-e36815b2dd42 2024-11-10T06:30:54,721 DEBUG [RS:0;4999977c7e1b:45891 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T06:30:54,722 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T06:30:54,722 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=725527, jitterRate=-0.07744482159614563}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T06:30:54,723 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731220254705Initializing all the Stores at 1731220254706 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220254706Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220254706Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220254706Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220254706Cleaning up temporary data from old regions at 1731220254718 (+12 ms)Region opened successfully at 1731220254723 (+5 ms) 2024-11-10T06:30:54,724 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T06:30:54,724 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T06:30:54,724 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T06:30:54,724 DEBUG [RS:0;4999977c7e1b:45891 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T06:30:54,724 DEBUG [RS:0;4999977c7e1b:45891 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T06:30:54,724 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T06:30:54,724 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T06:30:54,724 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T06:30:54,724 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731220254724Disabling compacts and flushes for region at 1731220254724Disabling writes for close at 1731220254724Writing region close event to WAL at 1731220254724Closed at 1731220254724 2024-11-10T06:30:54,726 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:30:54,726 DEBUG [RS:0;4999977c7e1b:45891 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T06:30:54,726 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T06:30:54,726 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T06:30:54,727 DEBUG [RS:0;4999977c7e1b:45891 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9e33cce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4999977c7e1b/172.17.0.2:0 2024-11-10T06:30:54,728 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T06:30:54,730 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T06:30:54,743 DEBUG [RS:0;4999977c7e1b:45891 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;4999977c7e1b:45891 2024-11-10T06:30:54,743 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T06:30:54,743 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T06:30:54,743 DEBUG [RS:0;4999977c7e1b:45891 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T06:30:54,744 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.HRegionServer(2659): reportForDuty to master=4999977c7e1b,39613,1731220254444 with port=45891, startcode=1731220254499 2024-11-10T06:30:54,744 DEBUG [RS:0;4999977c7e1b:45891 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T06:30:54,746 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51327, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T06:30:54,746 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39613 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4999977c7e1b,45891,1731220254499 2024-11-10T06:30:54,747 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39613 {}] master.ServerManager(517): Registering regionserver=4999977c7e1b,45891,1731220254499 2024-11-10T06:30:54,748 DEBUG [RS:0;4999977c7e1b:45891 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf 2024-11-10T06:30:54,748 DEBUG [RS:0;4999977c7e1b:45891 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40625 2024-11-10T06:30:54,748 DEBUG [RS:0;4999977c7e1b:45891 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T06:30:54,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T06:30:54,751 DEBUG [RS:0;4999977c7e1b:45891 {}] zookeeper.ZKUtil(111): regionserver:45891-0x10190e024420001, quorum=127.0.0.1:62359, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4999977c7e1b,45891,1731220254499 2024-11-10T06:30:54,751 WARN [RS:0;4999977c7e1b:45891 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T06:30:54,751 INFO [RS:0;4999977c7e1b:45891 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:30:54,752 DEBUG [RS:0;4999977c7e1b:45891 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499 2024-11-10T06:30:54,752 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4999977c7e1b,45891,1731220254499] 2024-11-10T06:30:54,755 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T06:30:54,757 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T06:30:54,758 INFO [RS:0;4999977c7e1b:45891 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T06:30:54,758 INFO [RS:0;4999977c7e1b:45891 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:54,758 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T06:30:54,759 INFO [RS:0;4999977c7e1b:45891 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T06:30:54,759 INFO [RS:0;4999977c7e1b:45891 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:54,759 DEBUG [RS:0;4999977c7e1b:45891 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:54,759 DEBUG [RS:0;4999977c7e1b:45891 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:54,759 DEBUG [RS:0;4999977c7e1b:45891 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:54,759 DEBUG [RS:0;4999977c7e1b:45891 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:54,759 DEBUG [RS:0;4999977c7e1b:45891 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:54,759 DEBUG [RS:0;4999977c7e1b:45891 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4999977c7e1b:0, corePoolSize=2, maxPoolSize=2 2024-11-10T06:30:54,759 DEBUG [RS:0;4999977c7e1b:45891 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:54,759 DEBUG [RS:0;4999977c7e1b:45891 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:54,760 DEBUG [RS:0;4999977c7e1b:45891 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:54,760 DEBUG [RS:0;4999977c7e1b:45891 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:54,760 DEBUG [RS:0;4999977c7e1b:45891 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:54,760 DEBUG [RS:0;4999977c7e1b:45891 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:54,760 DEBUG [RS:0;4999977c7e1b:45891 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4999977c7e1b:0, corePoolSize=3, maxPoolSize=3 2024-11-10T06:30:54,760 DEBUG [RS:0;4999977c7e1b:45891 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0, corePoolSize=3, maxPoolSize=3 2024-11-10T06:30:54,760 INFO [RS:0;4999977c7e1b:45891 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:54,761 INFO [RS:0;4999977c7e1b:45891 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:54,761 INFO [RS:0;4999977c7e1b:45891 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:54,761 INFO [RS:0;4999977c7e1b:45891 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:54,761 INFO [RS:0;4999977c7e1b:45891 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:54,761 INFO [RS:0;4999977c7e1b:45891 {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,45891,1731220254499-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T06:30:54,777 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T06:30:54,777 INFO [RS:0;4999977c7e1b:45891 {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,45891,1731220254499-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:54,777 INFO [RS:0;4999977c7e1b:45891 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:54,777 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.Replication(171): 4999977c7e1b,45891,1731220254499 started 2024-11-10T06:30:54,792 INFO [RS:0;4999977c7e1b:45891 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:54,792 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.HRegionServer(1482): Serving as 4999977c7e1b,45891,1731220254499, RpcServer on 4999977c7e1b/172.17.0.2:45891, sessionid=0x10190e024420001 2024-11-10T06:30:54,792 DEBUG [RS:0;4999977c7e1b:45891 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T06:30:54,793 DEBUG [RS:0;4999977c7e1b:45891 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4999977c7e1b,45891,1731220254499 2024-11-10T06:30:54,793 DEBUG [RS:0;4999977c7e1b:45891 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4999977c7e1b,45891,1731220254499' 2024-11-10T06:30:54,793 DEBUG [RS:0;4999977c7e1b:45891 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T06:30:54,793 DEBUG [RS:0;4999977c7e1b:45891 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T06:30:54,794 DEBUG [RS:0;4999977c7e1b:45891 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T06:30:54,794 DEBUG [RS:0;4999977c7e1b:45891 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T06:30:54,794 DEBUG [RS:0;4999977c7e1b:45891 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4999977c7e1b,45891,1731220254499 2024-11-10T06:30:54,794 DEBUG [RS:0;4999977c7e1b:45891 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4999977c7e1b,45891,1731220254499' 2024-11-10T06:30:54,794 DEBUG [RS:0;4999977c7e1b:45891 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T06:30:54,794 DEBUG [RS:0;4999977c7e1b:45891 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T06:30:54,795 DEBUG [RS:0;4999977c7e1b:45891 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T06:30:54,795 INFO [RS:0;4999977c7e1b:45891 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T06:30:54,795 INFO [RS:0;4999977c7e1b:45891 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T06:30:54,880 WARN [4999977c7e1b:39613 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-10T06:30:54,897 INFO [RS:0;4999977c7e1b:45891 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C45891%2C1731220254499, suffix=, logDir=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499, archiveDir=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/oldWALs, maxLogs=32 2024-11-10T06:30:54,898 INFO [RS:0;4999977c7e1b:45891 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C45891%2C1731220254499.1731220254898 2024-11-10T06:30:54,905 INFO [RS:0;4999977c7e1b:45891 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220254898 2024-11-10T06:30:54,912 DEBUG [RS:0;4999977c7e1b:45891 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41397:41397),(127.0.0.1/127.0.0.1:33573:33573)] 2024-11-10T06:30:55,130 DEBUG [4999977c7e1b:39613 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-10T06:30:55,131 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=4999977c7e1b,45891,1731220254499 2024-11-10T06:30:55,133 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4999977c7e1b,45891,1731220254499, state=OPENING 2024-11-10T06:30:55,134 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T06:30:55,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45891-0x10190e024420001, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:55,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:30:55,137 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T06:30:55,137 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:30:55,137 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=4999977c7e1b,45891,1731220254499}] 2024-11-10T06:30:55,137 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:30:55,315 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T06:30:55,317 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35153, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T06:30:55,321 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T06:30:55,322 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:30:55,324 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C45891%2C1731220254499.meta, suffix=.meta, logDir=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499, archiveDir=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/oldWALs, maxLogs=32 2024-11-10T06:30:55,324 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta 2024-11-10T06:30:55,330 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta 2024-11-10T06:30:55,330 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41397:41397),(127.0.0.1/127.0.0.1:33573:33573)] 2024-11-10T06:30:55,331 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T06:30:55,331 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T06:30:55,332 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T06:30:55,332 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T06:30:55,332 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T06:30:55,332 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:30:55,332 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T06:30:55,332 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T06:30:55,333 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T06:30:55,334 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T06:30:55,335 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:55,335 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:30:55,335 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T06:30:55,336 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T06:30:55,336 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:55,337 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:30:55,337 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T06:30:55,338 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T06:30:55,338 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:55,338 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:30:55,338 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T06:30:55,339 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T06:30:55,339 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:55,339 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:30:55,340 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T06:30:55,340 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740 2024-11-10T06:30:55,341 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740 2024-11-10T06:30:55,343 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T06:30:55,343 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T06:30:55,343 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T06:30:55,345 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T06:30:55,345 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=846787, jitterRate=0.07674641907215118}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T06:30:55,346 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T06:30:55,346 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731220255332Writing region info on filesystem at 1731220255332Initializing all the Stores at 1731220255333 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220255333Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220255333Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220255333Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220255333Cleaning up temporary data from old regions at 1731220255343 (+10 ms)Running coprocessor post-open hooks at 1731220255346 (+3 ms)Region opened successfully at 1731220255346 2024-11-10T06:30:55,347 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731220255315 2024-11-10T06:30:55,350 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T06:30:55,350 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T06:30:55,351 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=4999977c7e1b,45891,1731220254499 2024-11-10T06:30:55,353 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4999977c7e1b,45891,1731220254499, state=OPEN 2024-11-10T06:30:55,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T06:30:55,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45891-0x10190e024420001, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T06:30:55,358 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=4999977c7e1b,45891,1731220254499 2024-11-10T06:30:55,358 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:30:55,358 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:30:55,362 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T06:30:55,362 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=4999977c7e1b,45891,1731220254499 in 221 msec 2024-11-10T06:30:55,365 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T06:30:55,365 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 636 msec 2024-11-10T06:30:55,366 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:30:55,366 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T06:30:55,367 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T06:30:55,367 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4999977c7e1b,45891,1731220254499, seqNum=-1] 2024-11-10T06:30:55,368 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T06:30:55,369 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48879, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T06:30:55,375 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 696 msec 2024-11-10T06:30:55,375 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731220255375, completionTime=-1 2024-11-10T06:30:55,375 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-10T06:30:55,375 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-10T06:30:55,378 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-10T06:30:55,378 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731220315378 2024-11-10T06:30:55,378 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731220375378 2024-11-10T06:30:55,378 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-10T06:30:55,378 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,39613,1731220254444-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:55,378 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,39613,1731220254444-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:55,378 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,39613,1731220254444-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:55,379 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-4999977c7e1b:39613, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:55,379 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:55,379 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:55,380 DEBUG [master/4999977c7e1b:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T06:30:55,383 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.842sec 2024-11-10T06:30:55,383 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T06:30:55,383 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T06:30:55,383 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T06:30:55,383 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T06:30:55,383 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T06:30:55,383 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,39613,1731220254444-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T06:30:55,383 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,39613,1731220254444-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T06:30:55,386 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T06:30:55,386 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T06:30:55,386 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,39613,1731220254444-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:55,421 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69bb1e39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:30:55,421 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 4999977c7e1b,39613,-1 for getting cluster id 2024-11-10T06:30:55,421 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T06:30:55,424 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'df0a8ce8-8736-4461-b62c-e36815b2dd42' 2024-11-10T06:30:55,424 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T06:30:55,424 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "df0a8ce8-8736-4461-b62c-e36815b2dd42" 2024-11-10T06:30:55,425 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37b8da59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:30:55,425 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4999977c7e1b,39613,-1] 2024-11-10T06:30:55,425 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T06:30:55,425 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:30:55,427 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40540, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T06:30:55,428 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43ab55cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:30:55,428 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T06:30:55,429 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4999977c7e1b,45891,1731220254499, seqNum=-1] 2024-11-10T06:30:55,429 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T06:30:55,431 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58384, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T06:30:55,432 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=4999977c7e1b,39613,1731220254444 2024-11-10T06:30:55,433 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:30:55,436 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-10T06:30:55,452 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4999977c7e1b:0 server-side Connection retries=45 2024-11-10T06:30:55,452 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:30:55,452 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T06:30:55,452 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T06:30:55,453 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:30:55,453 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T06:30:55,453 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T06:30:55,453 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T06:30:55,453 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42305 2024-11-10T06:30:55,455 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42305 connecting to ZooKeeper ensemble=127.0.0.1:62359 2024-11-10T06:30:55,455 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:30:55,457 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:30:55,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:423050x0, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T06:30:55,462 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:42305-0x10190e024420002, quorum=127.0.0.1:62359, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-10T06:30:55,462 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42305-0x10190e024420002 connected 2024-11-10T06:30:55,462 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-10T06:30:55,463 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T06:30:55,463 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T06:30:55,464 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:42305-0x10190e024420002, quorum=127.0.0.1:62359, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T06:30:55,465 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42305-0x10190e024420002, quorum=127.0.0.1:62359, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T06:30:55,466 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42305 2024-11-10T06:30:55,466 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42305 2024-11-10T06:30:55,468 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42305 2024-11-10T06:30:55,470 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42305 2024-11-10T06:30:55,470 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42305 2024-11-10T06:30:55,471 INFO [RS:1;4999977c7e1b:42305 {}] regionserver.HRegionServer(746): ClusterId : df0a8ce8-8736-4461-b62c-e36815b2dd42 2024-11-10T06:30:55,471 DEBUG [RS:1;4999977c7e1b:42305 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T06:30:55,474 DEBUG [RS:1;4999977c7e1b:42305 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T06:30:55,474 DEBUG [RS:1;4999977c7e1b:42305 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T06:30:55,476 DEBUG [RS:1;4999977c7e1b:42305 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T06:30:55,477 DEBUG [RS:1;4999977c7e1b:42305 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16216cf1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4999977c7e1b/172.17.0.2:0 2024-11-10T06:30:55,484 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:30:55,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:30:55,494 DEBUG [RS:1;4999977c7e1b:42305 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;4999977c7e1b:42305 2024-11-10T06:30:55,494 INFO [RS:1;4999977c7e1b:42305 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T06:30:55,494 INFO [RS:1;4999977c7e1b:42305 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T06:30:55,494 DEBUG [RS:1;4999977c7e1b:42305 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T06:30:55,495 INFO [RS:1;4999977c7e1b:42305 {}] regionserver.HRegionServer(2659): reportForDuty to master=4999977c7e1b,39613,1731220254444 with port=42305, startcode=1731220255452 2024-11-10T06:30:55,495 DEBUG [RS:1;4999977c7e1b:42305 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T06:30:55,497 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35721, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T06:30:55,497 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39613 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4999977c7e1b,42305,1731220255452 2024-11-10T06:30:55,497 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39613 {}] master.ServerManager(517): Registering regionserver=4999977c7e1b,42305,1731220255452 2024-11-10T06:30:55,499 DEBUG [RS:1;4999977c7e1b:42305 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf 2024-11-10T06:30:55,499 DEBUG [RS:1;4999977c7e1b:42305 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40625 2024-11-10T06:30:55,499 DEBUG [RS:1;4999977c7e1b:42305 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T06:30:55,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T06:30:55,502 DEBUG [RS:1;4999977c7e1b:42305 {}] zookeeper.ZKUtil(111): regionserver:42305-0x10190e024420002, quorum=127.0.0.1:62359, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4999977c7e1b,42305,1731220255452 2024-11-10T06:30:55,502 WARN [RS:1;4999977c7e1b:42305 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T06:30:55,502 INFO [RS:1;4999977c7e1b:42305 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:30:55,502 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4999977c7e1b,42305,1731220255452] 2024-11-10T06:30:55,502 DEBUG [RS:1;4999977c7e1b:42305 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452 2024-11-10T06:30:55,507 INFO [RS:1;4999977c7e1b:42305 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T06:30:55,509 INFO [RS:1;4999977c7e1b:42305 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T06:30:55,509 INFO [RS:1;4999977c7e1b:42305 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T06:30:55,509 INFO [RS:1;4999977c7e1b:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:55,509 INFO [RS:1;4999977c7e1b:42305 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T06:30:55,510 INFO [RS:1;4999977c7e1b:42305 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T06:30:55,510 INFO [RS:1;4999977c7e1b:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:55,511 DEBUG [RS:1;4999977c7e1b:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:55,511 DEBUG [RS:1;4999977c7e1b:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:55,511 DEBUG [RS:1;4999977c7e1b:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:55,511 DEBUG [RS:1;4999977c7e1b:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:55,511 DEBUG [RS:1;4999977c7e1b:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:55,511 DEBUG [RS:1;4999977c7e1b:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4999977c7e1b:0, corePoolSize=2, maxPoolSize=2 2024-11-10T06:30:55,511 DEBUG [RS:1;4999977c7e1b:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:55,511 DEBUG [RS:1;4999977c7e1b:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:55,511 DEBUG [RS:1;4999977c7e1b:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:55,511 DEBUG [RS:1;4999977c7e1b:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:55,511 DEBUG [RS:1;4999977c7e1b:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:55,511 DEBUG [RS:1;4999977c7e1b:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:30:55,511 DEBUG [RS:1;4999977c7e1b:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4999977c7e1b:0, corePoolSize=3, maxPoolSize=3 2024-11-10T06:30:55,512 DEBUG [RS:1;4999977c7e1b:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0, corePoolSize=3, maxPoolSize=3 2024-11-10T06:30:55,512 INFO [RS:1;4999977c7e1b:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:55,512 INFO [RS:1;4999977c7e1b:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:55,512 INFO [RS:1;4999977c7e1b:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:55,512 INFO [RS:1;4999977c7e1b:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:55,512 INFO [RS:1;4999977c7e1b:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:55,512 INFO [RS:1;4999977c7e1b:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,42305,1731220255452-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T06:30:55,528 INFO [RS:1;4999977c7e1b:42305 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T06:30:55,528 INFO [RS:1;4999977c7e1b:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,42305,1731220255452-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:55,528 INFO [RS:1;4999977c7e1b:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:55,528 INFO [RS:1;4999977c7e1b:42305 {}] regionserver.Replication(171): 4999977c7e1b,42305,1731220255452 started 2024-11-10T06:30:55,542 INFO [RS:1;4999977c7e1b:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:30:55,542 INFO [RS:1;4999977c7e1b:42305 {}] regionserver.HRegionServer(1482): Serving as 4999977c7e1b,42305,1731220255452, RpcServer on 4999977c7e1b/172.17.0.2:42305, sessionid=0x10190e024420002 2024-11-10T06:30:55,542 DEBUG [RS:1;4999977c7e1b:42305 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T06:30:55,542 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;4999977c7e1b:42305,5,FailOnTimeoutGroup] 2024-11-10T06:30:55,542 DEBUG [RS:1;4999977c7e1b:42305 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4999977c7e1b,42305,1731220255452 2024-11-10T06:30:55,542 DEBUG [RS:1;4999977c7e1b:42305 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4999977c7e1b,42305,1731220255452' 2024-11-10T06:30:55,543 DEBUG [RS:1;4999977c7e1b:42305 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T06:30:55,543 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-10T06:30:55,543 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-10T06:30:55,543 DEBUG [RS:1;4999977c7e1b:42305 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T06:30:55,544 DEBUG [RS:1;4999977c7e1b:42305 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T06:30:55,544 DEBUG [RS:1;4999977c7e1b:42305 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T06:30:55,544 DEBUG [RS:1;4999977c7e1b:42305 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4999977c7e1b,42305,1731220255452 2024-11-10T06:30:55,544 DEBUG [RS:1;4999977c7e1b:42305 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4999977c7e1b,42305,1731220255452' 2024-11-10T06:30:55,544 DEBUG [RS:1;4999977c7e1b:42305 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T06:30:55,544 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 4999977c7e1b,39613,1731220254444 2024-11-10T06:30:55,544 DEBUG [RS:1;4999977c7e1b:42305 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T06:30:55,544 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3fa851c8 2024-11-10T06:30:55,545 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-10T06:30:55,545 DEBUG [RS:1;4999977c7e1b:42305 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T06:30:55,545 INFO [RS:1;4999977c7e1b:42305 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T06:30:55,545 INFO [RS:1;4999977c7e1b:42305 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T06:30:55,547 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40542, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-10T06:30:55,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39613 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-10T06:30:55,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39613 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-10T06:30:55,548 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39613 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T06:30:55,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39613 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-10T06:30:55,551 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-10T06:30:55,551 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:55,551 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39613 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-10T06:30:55,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39613 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T06:30:55,552 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-10T06:30:55,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741835_1011 (size=393) 2024-11-10T06:30:55,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37021 is added to blk_1073741835_1011 (size=393) 2024-11-10T06:30:55,562 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 87051d5bfe1f8458f4aca6a150270b03, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf 2024-11-10T06:30:55,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38361 is added to blk_1073741836_1012 (size=76) 2024-11-10T06:30:55,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37021 is added to blk_1073741836_1012 (size=76) 2024-11-10T06:30:55,569 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:30:55,569 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 87051d5bfe1f8458f4aca6a150270b03, disabling compactions & flushes 2024-11-10T06:30:55,569 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03. 2024-11-10T06:30:55,569 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03. 2024-11-10T06:30:55,569 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03. after waiting 0 ms 2024-11-10T06:30:55,569 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03. 2024-11-10T06:30:55,569 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03. 2024-11-10T06:30:55,569 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 87051d5bfe1f8458f4aca6a150270b03: Waiting for close lock at 1731220255569Disabling compacts and flushes for region at 1731220255569Disabling writes for close at 1731220255569Writing region close event to WAL at 1731220255569Closed at 1731220255569 2024-11-10T06:30:55,571 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-10T06:30:55,571 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731220255571"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731220255571"}]},"ts":"1731220255571"} 2024-11-10T06:30:55,574 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-10T06:30:55,576 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-10T06:30:55,576 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731220255576"}]},"ts":"1731220255576"} 2024-11-10T06:30:55,578 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-10T06:30:55,579 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=87051d5bfe1f8458f4aca6a150270b03, ASSIGN}] 2024-11-10T06:30:55,580 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=87051d5bfe1f8458f4aca6a150270b03, ASSIGN 2024-11-10T06:30:55,581 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=87051d5bfe1f8458f4aca6a150270b03, ASSIGN; state=OFFLINE, location=4999977c7e1b,45891,1731220254499; forceNewPlan=false, retain=false 2024-11-10T06:30:55,647 INFO [RS:1;4999977c7e1b:42305 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C42305%2C1731220255452, suffix=, logDir=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452, archiveDir=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/oldWALs, maxLogs=32 2024-11-10T06:30:55,648 INFO [RS:1;4999977c7e1b:42305 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C42305%2C1731220255452.1731220255648 2024-11-10T06:30:55,655 INFO [RS:1;4999977c7e1b:42305 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 2024-11-10T06:30:55,656 DEBUG [RS:1;4999977c7e1b:42305 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41397:41397),(127.0.0.1/127.0.0.1:33573:33573)] 2024-11-10T06:30:55,732 INFO [4999977c7e1b:39613 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-10T06:30:55,733 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=87051d5bfe1f8458f4aca6a150270b03, regionState=OPENING, regionLocation=4999977c7e1b,45891,1731220254499 2024-11-10T06:30:55,736 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=87051d5bfe1f8458f4aca6a150270b03, ASSIGN because future has completed 2024-11-10T06:30:55,736 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 87051d5bfe1f8458f4aca6a150270b03, server=4999977c7e1b,45891,1731220254499}] 2024-11-10T06:30:55,894 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03. 2024-11-10T06:30:55,894 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 87051d5bfe1f8458f4aca6a150270b03, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03.', STARTKEY => '', ENDKEY => ''} 2024-11-10T06:30:55,895 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 87051d5bfe1f8458f4aca6a150270b03 2024-11-10T06:30:55,895 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:30:55,895 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 87051d5bfe1f8458f4aca6a150270b03 2024-11-10T06:30:55,895 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 87051d5bfe1f8458f4aca6a150270b03 2024-11-10T06:30:55,896 INFO [StoreOpener-87051d5bfe1f8458f4aca6a150270b03-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 87051d5bfe1f8458f4aca6a150270b03 2024-11-10T06:30:55,898 INFO [StoreOpener-87051d5bfe1f8458f4aca6a150270b03-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 87051d5bfe1f8458f4aca6a150270b03 columnFamilyName info 2024-11-10T06:30:55,898 DEBUG [StoreOpener-87051d5bfe1f8458f4aca6a150270b03-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:30:55,898 INFO [StoreOpener-87051d5bfe1f8458f4aca6a150270b03-1 {}] regionserver.HStore(327): Store=87051d5bfe1f8458f4aca6a150270b03/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:30:55,898 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 87051d5bfe1f8458f4aca6a150270b03 2024-11-10T06:30:55,899 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03 2024-11-10T06:30:55,899 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03 2024-11-10T06:30:55,900 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 87051d5bfe1f8458f4aca6a150270b03 2024-11-10T06:30:55,900 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 87051d5bfe1f8458f4aca6a150270b03 2024-11-10T06:30:55,902 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 87051d5bfe1f8458f4aca6a150270b03 2024-11-10T06:30:55,904 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T06:30:55,904 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 87051d5bfe1f8458f4aca6a150270b03; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=773087, jitterRate=-0.016969725489616394}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T06:30:55,905 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 87051d5bfe1f8458f4aca6a150270b03 2024-11-10T06:30:55,905 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 87051d5bfe1f8458f4aca6a150270b03: Running coprocessor pre-open hook at 1731220255895Writing region info on filesystem at 1731220255895Initializing all the Stores at 1731220255896 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220255896Cleaning up temporary data from old regions at 1731220255900 (+4 ms)Running coprocessor post-open hooks at 1731220255905 (+5 ms)Region opened successfully at 1731220255905 2024-11-10T06:30:55,906 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03., pid=6, masterSystemTime=1731220255889 2024-11-10T06:30:55,909 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03. 2024-11-10T06:30:55,909 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03. 2024-11-10T06:30:55,910 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=87051d5bfe1f8458f4aca6a150270b03, regionState=OPEN, openSeqNum=2, regionLocation=4999977c7e1b,45891,1731220254499 2024-11-10T06:30:55,912 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 87051d5bfe1f8458f4aca6a150270b03, server=4999977c7e1b,45891,1731220254499 because future has completed 2024-11-10T06:30:55,917 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-10T06:30:55,917 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 87051d5bfe1f8458f4aca6a150270b03, server=4999977c7e1b,45891,1731220254499 in 178 msec 2024-11-10T06:30:55,920 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-10T06:30:55,920 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=87051d5bfe1f8458f4aca6a150270b03, ASSIGN in 338 msec 2024-11-10T06:30:55,921 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-10T06:30:55,921 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731220255921"}]},"ts":"1731220255921"} 2024-11-10T06:30:55,923 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-10T06:30:55,924 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-10T06:30:55,926 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 376 msec 2024-11-10T06:30:56,004 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-10T06:30:56,008 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:30:56,024 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:30:56,026 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:30:56,026 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:30:56,034 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-10T06:30:56,034 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-10T06:30:56,035 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-10T06:31:00,755 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-10T06:31:01,397 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-10T06:31:01,399 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:01,416 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:01,420 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:01,420 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:05,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39613 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T06:31:05,616 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-10T06:31:05,616 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-10T06:31:05,619 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-10T06:31:05,620 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03. 2024-11-10T06:31:05,633 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:31:05,637 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:31:05,637 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:31:05,638 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:31:05,638 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T06:31:05,638 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48400c5f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:31:05,639 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2378f0ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:31:05,756 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ab7c419{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/java.io.tmpdir/jetty-localhost-44657-hadoop-hdfs-3_4_1-tests_jar-_-any-1517151583775843793/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:31:05,756 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@35f8237e{HTTP/1.1, (http/1.1)}{localhost:44657} 2024-11-10T06:31:05,757 INFO [Time-limited test {}] server.Server(415): Started @115457ms 2024-11-10T06:31:05,758 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:31:05,792 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:31:05,795 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:31:05,796 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:31:05,796 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:31:05,796 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T06:31:05,796 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a3ddcf2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:31:05,797 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6ee7700a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:31:05,868 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data5/current/BP-741755570-172.17.0.2-1731220253746/current, will proceed with Du for space computation calculation, 2024-11-10T06:31:05,868 WARN [Thread-830 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data6/current/BP-741755570-172.17.0.2-1731220253746/current, will proceed with Du for space computation calculation, 2024-11-10T06:31:05,885 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:31:05,887 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcddf0035ea0b574e with lease ID 0x9474d1e025b216c1: Processing first storage report for DS-6983d177-432e-4217-85a8-e2c7a50145fb from datanode DatanodeRegistration(127.0.0.1:39575, datanodeUuid=8366e522-dd86-4d42-af39-f2b45cfe085a, infoPort=34193, infoSecurePort=0, ipcPort=35527, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746) 2024-11-10T06:31:05,887 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcddf0035ea0b574e with lease ID 0x9474d1e025b216c1: from storage DS-6983d177-432e-4217-85a8-e2c7a50145fb node DatanodeRegistration(127.0.0.1:39575, datanodeUuid=8366e522-dd86-4d42-af39-f2b45cfe085a, infoPort=34193, infoSecurePort=0, ipcPort=35527, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:31:05,887 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcddf0035ea0b574e with lease ID 0x9474d1e025b216c1: Processing first storage report for DS-3a9c30a1-44b7-43a8-8e97-bf93d6bdc898 from datanode DatanodeRegistration(127.0.0.1:39575, datanodeUuid=8366e522-dd86-4d42-af39-f2b45cfe085a, infoPort=34193, infoSecurePort=0, ipcPort=35527, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746) 2024-11-10T06:31:05,887 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcddf0035ea0b574e with lease ID 0x9474d1e025b216c1: from storage DS-3a9c30a1-44b7-43a8-8e97-bf93d6bdc898 node DatanodeRegistration(127.0.0.1:39575, datanodeUuid=8366e522-dd86-4d42-af39-f2b45cfe085a, infoPort=34193, infoSecurePort=0, ipcPort=35527, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:31:05,924 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@13a31f2a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/java.io.tmpdir/jetty-localhost-40403-hadoop-hdfs-3_4_1-tests_jar-_-any-1857738928016057351/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:31:05,925 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@37073a3f{HTTP/1.1, (http/1.1)}{localhost:40403} 2024-11-10T06:31:05,925 INFO [Time-limited test {}] server.Server(415): Started @115626ms 2024-11-10T06:31:05,926 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:31:05,963 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:31:05,966 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:31:05,967 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:31:05,967 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:31:05,967 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T06:31:05,967 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d0c94cc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:31:05,968 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4cb48605{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:31:06,022 WARN [Thread-865 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data8/current/BP-741755570-172.17.0.2-1731220253746/current, will proceed with Du for space computation calculation, 2024-11-10T06:31:06,022 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data7/current/BP-741755570-172.17.0.2-1731220253746/current, will proceed with Du for space computation calculation, 2024-11-10T06:31:06,040 WARN [Thread-844 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:31:06,043 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9f17a455d0074df8 with lease ID 0x9474d1e025b216c2: Processing first storage report for DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d from datanode DatanodeRegistration(127.0.0.1:39305, datanodeUuid=b468639d-7b11-45e5-9a2a-01e054eead7f, infoPort=41513, infoSecurePort=0, ipcPort=33607, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746) 2024-11-10T06:31:06,043 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9f17a455d0074df8 with lease ID 0x9474d1e025b216c2: from storage DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d node DatanodeRegistration(127.0.0.1:39305, datanodeUuid=b468639d-7b11-45e5-9a2a-01e054eead7f, infoPort=41513, infoSecurePort=0, ipcPort=33607, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T06:31:06,043 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9f17a455d0074df8 with lease ID 0x9474d1e025b216c2: Processing first storage report for DS-9f86d6b9-7965-4eea-bf7b-5ec11a7b8ebd from datanode DatanodeRegistration(127.0.0.1:39305, datanodeUuid=b468639d-7b11-45e5-9a2a-01e054eead7f, infoPort=41513, infoSecurePort=0, ipcPort=33607, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746) 2024-11-10T06:31:06,043 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9f17a455d0074df8 with lease ID 0x9474d1e025b216c2: from storage DS-9f86d6b9-7965-4eea-bf7b-5ec11a7b8ebd node DatanodeRegistration(127.0.0.1:39305, datanodeUuid=b468639d-7b11-45e5-9a2a-01e054eead7f, infoPort=41513, infoSecurePort=0, ipcPort=33607, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:31:06,085 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@77de3d88{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/java.io.tmpdir/jetty-localhost-40749-hadoop-hdfs-3_4_1-tests_jar-_-any-12527260401213086378/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:31:06,086 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@55e6039a{HTTP/1.1, (http/1.1)}{localhost:40749} 2024-11-10T06:31:06,086 INFO [Time-limited test {}] server.Server(415): Started @115787ms 2024-11-10T06:31:06,087 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:31:06,182 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data9/current/BP-741755570-172.17.0.2-1731220253746/current, will proceed with Du for space computation calculation, 2024-11-10T06:31:06,183 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data10/current/BP-741755570-172.17.0.2-1731220253746/current, will proceed with Du for space computation calculation, 2024-11-10T06:31:06,200 WARN [Thread-879 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:31:06,203 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x555ad60d4133503b with lease ID 0x9474d1e025b216c3: Processing first storage report for DS-9d42829a-de72-477c-9039-78aa7a78ff04 from datanode DatanodeRegistration(127.0.0.1:38159, datanodeUuid=d0b45e4f-20df-4795-97b7-9a296c8b0c00, infoPort=42341, infoSecurePort=0, ipcPort=43655, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746) 2024-11-10T06:31:06,203 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x555ad60d4133503b with lease ID 0x9474d1e025b216c3: from storage DS-9d42829a-de72-477c-9039-78aa7a78ff04 node DatanodeRegistration(127.0.0.1:38159, datanodeUuid=d0b45e4f-20df-4795-97b7-9a296c8b0c00, infoPort=42341, infoSecurePort=0, ipcPort=43655, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T06:31:06,203 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x555ad60d4133503b with lease ID 0x9474d1e025b216c3: Processing first storage report for DS-852ed366-342d-45f3-b4b5-495bd2dbf2cd from datanode DatanodeRegistration(127.0.0.1:38159, datanodeUuid=d0b45e4f-20df-4795-97b7-9a296c8b0c00, infoPort=42341, infoSecurePort=0, ipcPort=43655, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746) 2024-11-10T06:31:06,203 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x555ad60d4133503b with lease ID 0x9474d1e025b216c3: from storage DS-852ed366-342d-45f3-b4b5-495bd2dbf2cd node DatanodeRegistration(127.0.0.1:38159, datanodeUuid=d0b45e4f-20df-4795-97b7-9a296c8b0c00, infoPort=42341, infoSecurePort=0, ipcPort=43655, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:31:06,209 WARN [ResponseProcessor for block BP-741755570-172.17.0.2-1731220253746:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-741755570-172.17.0.2-1731220253746:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:06,209 WARN [ResponseProcessor for block BP-741755570-172.17.0.2-1731220253746:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-741755570-172.17.0.2-1731220253746:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:06,209 WARN [ResponseProcessor for block BP-741755570-172.17.0.2-1731220253746:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-741755570-172.17.0.2-1731220253746:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:06,209 WARN [ResponseProcessor for block BP-741755570-172.17.0.2-1731220253746:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-741755570-172.17.0.2-1731220253746:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:06,209 WARN [DataStreamer for file /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220254898 block BP-741755570-172.17.0.2-1731220253746:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK], DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK]) is bad. 2024-11-10T06:31:06,209 WARN [DataStreamer for file /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/WALs/4999977c7e1b,39613,1731220254444/4999977c7e1b%2C39613%2C1731220254444.1731220254601 block BP-741755570-172.17.0.2-1731220253746:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK], DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK]) is bad. 2024-11-10T06:31:06,209 WARN [DataStreamer for file /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 block BP-741755570-172.17.0.2-1731220253746:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK], DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK]) is bad. 2024-11-10T06:31:06,210 WARN [DataStreamer for file /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta block BP-741755570-172.17.0.2-1731220253746:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK], DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK]) is bad. 2024-11-10T06:31:06,211 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1972470542_22 at /127.0.0.1:60856 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:38361:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60856 dst: /127.0.0.1:38361 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:06,211 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1972470542_22 at /127.0.0.1:45550 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:37021:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45550 dst: /127.0.0.1:37021 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:06,211 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:60814 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38361:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60814 dst: /127.0.0.1:38361 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:06,211 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:60820 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38361:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60820 dst: /127.0.0.1:38361 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:06,211 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1214526257_22 at /127.0.0.1:45468 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37021:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45468 dst: /127.0.0.1:37021 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:06,212 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3893f69{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:31:06,211 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:45506 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37021:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45506 dst: /127.0.0.1:37021 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:06,211 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:45520 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37021:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45520 dst: /127.0.0.1:37021 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:06,211 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1214526257_22 at /127.0.0.1:60782 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38361:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60782 dst: /127.0.0.1:38361 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:06,213 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@206fa4ee{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:31:06,213 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:31:06,213 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c9fa828{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:31:06,213 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30985369{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/hadoop.log.dir/,STOPPED} 2024-11-10T06:31:06,217 WARN [BP-741755570-172.17.0.2-1731220253746 heartbeating to localhost/127.0.0.1:40625 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:31:06,217 WARN [BP-741755570-172.17.0.2-1731220253746 heartbeating to localhost/127.0.0.1:40625 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-741755570-172.17.0.2-1731220253746 (Datanode Uuid e19f1e86-46e6-4f0d-b75d-d7b0b9a2c8c8) service to localhost/127.0.0.1:40625 2024-11-10T06:31:06,217 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:31:06,217 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:31:06,217 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data3/current/BP-741755570-172.17.0.2-1731220253746 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:31:06,217 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data4/current/BP-741755570-172.17.0.2-1731220253746 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:31:06,218 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:31:06,218 WARN [DataStreamer for file /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/WALs/4999977c7e1b,39613,1731220254444/4999977c7e1b%2C39613%2C1731220254444.1731220254601 block BP-741755570-172.17.0.2-1731220253746:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:06,218 WARN [DataStreamer for file /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220254898 block BP-741755570-172.17.0.2-1731220253746:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:06,218 WARN [DataStreamer for file /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 block BP-741755570-172.17.0.2-1731220253746:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:06,220 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@6fdb820b {}] datanode.DataXceiver(331): 127.0.0.1:38361:DataXceiver error processing unknown operation src: /127.0.0.1:39166 dst: /127.0.0.1:38361 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:06,220 WARN [DataStreamer for file /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta block BP-741755570-172.17.0.2-1731220253746:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:06,229 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@24d45158{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:31:06,230 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@76afaf39{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:31:06,230 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:31:06,230 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6cb8e75e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:31:06,230 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@778fdefb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/hadoop.log.dir/,STOPPED} 2024-11-10T06:31:06,231 WARN [BP-741755570-172.17.0.2-1731220253746 heartbeating to localhost/127.0.0.1:40625 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:31:06,231 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:31:06,231 WARN [BP-741755570-172.17.0.2-1731220253746 heartbeating to localhost/127.0.0.1:40625 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-741755570-172.17.0.2-1731220253746 (Datanode Uuid c03a6f8f-b159-46ce-9bff-c535fb87cf89) service to localhost/127.0.0.1:40625 2024-11-10T06:31:06,231 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:31:06,232 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data1/current/BP-741755570-172.17.0.2-1731220253746 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:31:06,232 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data2/current/BP-741755570-172.17.0.2-1731220253746 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:31:06,232 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:31:06,236 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03., hostname=4999977c7e1b,45891,1731220254499, seqNum=2] 2024-11-10T06:31:06,238 ERROR [FSHLog-0-hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf-prefix:4999977c7e1b,45891,1731220254499 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:06,238 WARN [FSHLog-0-hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf-prefix:4999977c7e1b,45891,1731220254499 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:06,238 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:06,238 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 4999977c7e1b%2C45891%2C1731220254499:(num 1731220254898) roll requested 2024-11-10T06:31:06,238 INFO [regionserver/4999977c7e1b:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C45891%2C1731220254499.1731220266238 2024-11-10T06:31:06,244 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:06,244 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:06,244 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:06,245 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:06,245 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:06,245 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220254898 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220266238 2024-11-10T06:31:06,245 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:06,245 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:06,246 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-10T06:31:06,247 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-10T06:31:06,247 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220254898 2024-11-10T06:31:06,248 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42341:42341),(127.0.0.1/127.0.0.1:34193:34193)] 2024-11-10T06:31:06,248 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220254898 is not closed yet, will try archiving it next time 2024-11-10T06:31:06,249 WARN [IPC Server handler 3 on default port 40625 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220254898 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-10T06:31:06,253 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220254898 after 4ms 2024-11-10T06:31:06,580 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:07,513 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:08,249 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:08,250 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220266238 2024-11-10T06:31:08,250 WARN [ResponseProcessor for block BP-741755570-172.17.0.2-1731220253746:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-741755570-172.17.0.2-1731220253746:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:08,251 WARN [DataStreamer for file /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220266238 block BP-741755570-172.17.0.2-1731220253746:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK], DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK]) is bad. 2024-11-10T06:31:08,251 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:48588 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:38159:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48588 dst: /127.0.0.1:38159 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:08,251 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:59144 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:39575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59144 dst: /127.0.0.1:39575 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:08,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@77de3d88{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:31:08,253 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@55e6039a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:31:08,253 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:31:08,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4cb48605{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:31:08,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d0c94cc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/hadoop.log.dir/,STOPPED} 2024-11-10T06:31:08,255 WARN [BP-741755570-172.17.0.2-1731220253746 heartbeating to localhost/127.0.0.1:40625 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:31:08,255 WARN [BP-741755570-172.17.0.2-1731220253746 heartbeating to localhost/127.0.0.1:40625 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-741755570-172.17.0.2-1731220253746 (Datanode Uuid d0b45e4f-20df-4795-97b7-9a296c8b0c00) service to localhost/127.0.0.1:40625 2024-11-10T06:31:08,255 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:31:08,255 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:31:08,255 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data9/current/BP-741755570-172.17.0.2-1731220253746 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:31:08,255 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data10/current/BP-741755570-172.17.0.2-1731220253746 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:31:08,256 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:31:08,580 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:09,513 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:10,249 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:10,250 WARN [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK]] 2024-11-10T06:31:10,250 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 4999977c7e1b%2C45891%2C1731220254499:(num 1731220266238) roll requested 2024-11-10T06:31:10,250 INFO [regionserver/4999977c7e1b:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C45891%2C1731220254499.1731220270250 2024-11-10T06:31:10,254 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220254898 after 4007ms 2024-11-10T06:31:10,255 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38361 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:10,255 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:59890 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741839_1021] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data6]'}, localName='127.0.0.1:39575', datanodeUuid='8366e522-dd86-4d42-af39-f2b45cfe085a', xmitsInProgress=0}:Exception transferring block BP-741755570-172.17.0.2-1731220253746:blk_1073741839_1021 to mirror 127.0.0.1:38361 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:10,255 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK], DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]) is bad. 2024-11-10T06:31:10,255 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741839_1021 2024-11-10T06:31:10,255 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:59890 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741839_1021] {}] datanode.BlockReceiver(316): Block 1073741839 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-10T06:31:10,255 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:59890 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:39575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59890 dst: /127.0.0.1:39575 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:10,258 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK] 2024-11-10T06:31:10,261 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T06:31:10,261 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:10,261 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK], DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK]) is bad. 2024-11-10T06:31:10,261 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741840_1022 2024-11-10T06:31:10,262 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK] 2024-11-10T06:31:10,264 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37021 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:10,264 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:59892 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data6]'}, localName='127.0.0.1:39575', datanodeUuid='8366e522-dd86-4d42-af39-f2b45cfe085a', xmitsInProgress=0}:Exception transferring block BP-741755570-172.17.0.2-1731220253746:blk_1073741841_1023 to mirror 127.0.0.1:37021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:10,264 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK], DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK]) is bad. 2024-11-10T06:31:10,264 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741841_1023 2024-11-10T06:31:10,264 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:59892 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-10T06:31:10,264 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:59892 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:39575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59892 dst: /127.0.0.1:39575 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:10,265 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK] 2024-11-10T06:31:10,268 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:10,269 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:10,269 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:10,269 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:10,269 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:10,269 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220266238 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220270250 2024-11-10T06:31:10,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39575 is added to blk_1073741838_1020 (size=3600) 2024-11-10T06:31:10,272 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34193:34193),(127.0.0.1/127.0.0.1:41513:41513)] 2024-11-10T06:31:10,272 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220254898 is not closed yet, will try archiving it next time 2024-11-10T06:31:10,272 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220266238 is not closed yet, will try archiving it next time 2024-11-10T06:31:10,581 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:10,672 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220254898 is not closed yet, will try archiving it next time 2024-11-10T06:31:11,513 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:12,265 WARN [ResponseProcessor for block BP-741755570-172.17.0.2-1731220253746:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-741755570-172.17.0.2-1731220253746:blk_1073741842_1024 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:12,265 WARN [DataStreamer for file /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220270250 block BP-741755570-172.17.0.2-1731220253746:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK], DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK]) is bad. 2024-11-10T06:31:12,266 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:59908 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:39575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59908 dst: /127.0.0.1:39575 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:12,266 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40034 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:39305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40034 dst: /127.0.0.1:39305 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:12,267 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ab7c419{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:31:12,267 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@35f8237e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:31:12,267 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:31:12,268 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2378f0ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:31:12,268 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48400c5f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/hadoop.log.dir/,STOPPED} 2024-11-10T06:31:12,269 WARN [BP-741755570-172.17.0.2-1731220253746 heartbeating to localhost/127.0.0.1:40625 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:31:12,269 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:31:12,269 WARN [BP-741755570-172.17.0.2-1731220253746 heartbeating to localhost/127.0.0.1:40625 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-741755570-172.17.0.2-1731220253746 (Datanode Uuid 8366e522-dd86-4d42-af39-f2b45cfe085a) service to localhost/127.0.0.1:40625 2024-11-10T06:31:12,269 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:31:12,270 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data5/current/BP-741755570-172.17.0.2-1731220253746 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:31:12,270 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data6/current/BP-741755570-172.17.0.2-1731220253746 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:31:12,270 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:31:12,272 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:12,273 WARN [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK]] 2024-11-10T06:31:12,273 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 4999977c7e1b%2C45891%2C1731220254499:(num 1731220270250) roll requested 2024-11-10T06:31:12,273 INFO [regionserver/4999977c7e1b:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C45891%2C1731220254499.1731220272273 2024-11-10T06:31:12,276 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:12,276 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK], DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK]) is bad. 2024-11-10T06:31:12,276 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741843_1026 2024-11-10T06:31:12,277 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK] 2024-11-10T06:31:12,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45891 {}] regionserver.HRegion(8855): Flush requested on 87051d5bfe1f8458f4aca6a150270b03 2024-11-10T06:31:12,279 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 87051d5bfe1f8458f4aca6a150270b03 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-10T06:31:12,279 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40058 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data8]'}, localName='127.0.0.1:39305', datanodeUuid='b468639d-7b11-45e5-9a2a-01e054eead7f', xmitsInProgress=0}:Exception transferring block BP-741755570-172.17.0.2-1731220253746:blk_1073741844_1027 to mirror 127.0.0.1:38361 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:12,279 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38361 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:12,280 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40058 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-10T06:31:12,280 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK], DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]) is bad. 2024-11-10T06:31:12,280 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741844_1027 2024-11-10T06:31:12,280 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40058 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:39305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40058 dst: /127.0.0.1:39305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:12,280 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK] 2024-11-10T06:31:12,282 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:12,282 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK], DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK]) is bad. 2024-11-10T06:31:12,282 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741845_1028 2024-11-10T06:31:12,283 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK] 2024-11-10T06:31:12,286 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38159 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:12,286 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40068 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data8]'}, localName='127.0.0.1:39305', datanodeUuid='b468639d-7b11-45e5-9a2a-01e054eead7f', xmitsInProgress=0}:Exception transferring block BP-741755570-172.17.0.2-1731220253746:blk_1073741846_1029 to mirror 127.0.0.1:38159 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:12,286 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK], DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK]) is bad. 2024-11-10T06:31:12,286 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741846_1029 2024-11-10T06:31:12,286 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40068 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-10T06:31:12,286 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40068 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:39305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40068 dst: /127.0.0.1:39305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:12,287 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK] 2024-11-10T06:31:12,287 WARN [IPC Server handler 0 on default port 40625 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-10T06:31:12,288 WARN [IPC Server handler 0 on default port 40625 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-10T06:31:12,288 WARN [IPC Server handler 0 on default port 40625 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-10T06:31:12,291 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:12,291 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:12,291 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:12,291 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:12,291 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:12,291 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220270250 with entries=10, filesize=10.67 KB; new WAL /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220272273 2024-11-10T06:31:12,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741842_1025 (size=10937) 2024-11-10T06:31:12,299 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41513:41513)] 2024-11-10T06:31:12,299 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220254898 is not closed yet, will try archiving it next time 2024-11-10T06:31:12,299 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220270250 is not closed yet, will try archiving it next time 2024-11-10T06:31:12,306 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/febb924d3ccf47988a3a97f420f847da is 1080, key is row0002/info:/1731220268257/Put/seqid=0 2024-11-10T06:31:12,308 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:12,308 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK], DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK]) is bad. 2024-11-10T06:31:12,308 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741848_1031 2024-11-10T06:31:12,309 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK] 2024-11-10T06:31:12,311 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38159 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:12,311 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK], DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK]) is bad. 2024-11-10T06:31:12,311 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40094 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data8]'}, localName='127.0.0.1:39305', datanodeUuid='b468639d-7b11-45e5-9a2a-01e054eead7f', xmitsInProgress=0}:Exception transferring block BP-741755570-172.17.0.2-1731220253746:blk_1073741849_1032 to mirror 127.0.0.1:38159 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:12,311 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741849_1032 2024-11-10T06:31:12,311 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40094 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-10T06:31:12,311 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40094 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:39305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40094 dst: /127.0.0.1:39305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:12,312 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK] 2024-11-10T06:31:12,313 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:12,313 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK], DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]) is bad. 2024-11-10T06:31:12,313 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741850_1033 2024-11-10T06:31:12,313 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK] 2024-11-10T06:31:12,315 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37021 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:12,315 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40106 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data8]'}, localName='127.0.0.1:39305', datanodeUuid='b468639d-7b11-45e5-9a2a-01e054eead7f', xmitsInProgress=0}:Exception transferring block BP-741755570-172.17.0.2-1731220253746:blk_1073741851_1034 to mirror 127.0.0.1:37021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:12,316 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK], DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK]) is bad. 2024-11-10T06:31:12,316 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741851_1034 2024-11-10T06:31:12,316 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40106 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-10T06:31:12,316 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40106 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:39305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40106 dst: /127.0.0.1:39305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:12,316 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK] 2024-11-10T06:31:12,317 WARN [IPC Server handler 2 on default port 40625 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-10T06:31:12,317 WARN [IPC Server handler 2 on default port 40625 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-10T06:31:12,317 WARN [IPC Server handler 2 on default port 40625 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-10T06:31:12,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741852_1035 (size=10347) 2024-11-10T06:31:12,581 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:12,694 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220254898 is not closed yet, will try archiving it next time 2024-11-10T06:31:12,721 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/febb924d3ccf47988a3a97f420f847da 2024-11-10T06:31:12,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/febb924d3ccf47988a3a97f420f847da as hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/febb924d3ccf47988a3a97f420f847da 2024-11-10T06:31:12,734 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/febb924d3ccf47988a3a97f420f847da, entries=5, sequenceid=11, filesize=10.1 K 2024-11-10T06:31:12,735 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 87051d5bfe1f8458f4aca6a150270b03 in 456ms, sequenceid=11, compaction requested=false 2024-11-10T06:31:12,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 87051d5bfe1f8458f4aca6a150270b03: 2024-11-10T06:31:12,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45891 {}] regionserver.HRegion(8855): Flush requested on 87051d5bfe1f8458f4aca6a150270b03 2024-11-10T06:31:12,909 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 87051d5bfe1f8458f4aca6a150270b03 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-10T06:31:12,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/558e37bb2c7f4126beb1bc1fdefa744f is 1080, key is row0007/info:/1731220272281/Put/seqid=0 2024-11-10T06:31:12,916 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:12,916 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK], DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]) is bad. 2024-11-10T06:31:12,916 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741853_1036 2024-11-10T06:31:12,917 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK] 2024-11-10T06:31:12,919 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38159 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:12,919 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40126 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data8]'}, localName='127.0.0.1:39305', datanodeUuid='b468639d-7b11-45e5-9a2a-01e054eead7f', xmitsInProgress=0}:Exception transferring block BP-741755570-172.17.0.2-1731220253746:blk_1073741854_1037 to mirror 127.0.0.1:38159 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:12,919 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK], DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK]) is bad. 2024-11-10T06:31:12,919 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40126 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-10T06:31:12,919 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741854_1037 2024-11-10T06:31:12,919 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40126 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:39305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40126 dst: /127.0.0.1:39305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:12,920 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK] 2024-11-10T06:31:12,921 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:12,921 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK], DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK]) is bad. 2024-11-10T06:31:12,921 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741855_1038 2024-11-10T06:31:12,922 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK] 2024-11-10T06:31:12,923 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39575 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:12,923 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40128 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741856_1039] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data8]'}, localName='127.0.0.1:39305', datanodeUuid='b468639d-7b11-45e5-9a2a-01e054eead7f', xmitsInProgress=0}:Exception transferring block BP-741755570-172.17.0.2-1731220253746:blk_1073741856_1039 to mirror 127.0.0.1:39575 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:12,924 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK], DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK]) is bad. 2024-11-10T06:31:12,924 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741856_1039 2024-11-10T06:31:12,924 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40128 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741856_1039] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-10T06:31:12,924 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40128 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741856_1039] {}] datanode.DataXceiver(331): 127.0.0.1:39305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40128 dst: /127.0.0.1:39305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:12,924 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK] 2024-11-10T06:31:12,925 WARN [IPC Server handler 1 on default port 40625 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-10T06:31:12,925 WARN [IPC Server handler 1 on default port 40625 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-10T06:31:12,925 WARN [IPC Server handler 1 on default port 40625 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-10T06:31:12,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741857_1040 (size=12506) 2024-11-10T06:31:13,328 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/558e37bb2c7f4126beb1bc1fdefa744f 2024-11-10T06:31:13,335 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/558e37bb2c7f4126beb1bc1fdefa744f as hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/558e37bb2c7f4126beb1bc1fdefa744f 2024-11-10T06:31:13,341 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/558e37bb2c7f4126beb1bc1fdefa744f, entries=7, sequenceid=24, filesize=12.2 K 2024-11-10T06:31:13,343 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 87051d5bfe1f8458f4aca6a150270b03 in 433ms, sequenceid=24, compaction requested=false 2024-11-10T06:31:13,343 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 87051d5bfe1f8458f4aca6a150270b03: 2024-11-10T06:31:13,343 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-10T06:31:13,343 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:31:13,343 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/558e37bb2c7f4126beb1bc1fdefa744f because midkey is the same as first or last row 2024-11-10T06:31:13,514 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:14,299 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:14,299 WARN [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK]] 2024-11-10T06:31:14,299 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 4999977c7e1b%2C45891%2C1731220254499:(num 1731220272273) roll requested 2024-11-10T06:31:14,300 INFO [regionserver/4999977c7e1b:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C45891%2C1731220254499.1731220274299 2024-11-10T06:31:14,303 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:14,303 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK], DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]) is bad. 2024-11-10T06:31:14,303 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741858_1041 2024-11-10T06:31:14,303 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK] 2024-11-10T06:31:14,306 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38159 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:14,306 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40148 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data8]'}, localName='127.0.0.1:39305', datanodeUuid='b468639d-7b11-45e5-9a2a-01e054eead7f', xmitsInProgress=0}:Exception transferring block BP-741755570-172.17.0.2-1731220253746:blk_1073741859_1042 to mirror 127.0.0.1:38159 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:14,306 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK], DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK]) is bad. 2024-11-10T06:31:14,306 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741859_1042 2024-11-10T06:31:14,306 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40148 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-10T06:31:14,306 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40148 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:39305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40148 dst: /127.0.0.1:39305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:14,306 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK] 2024-11-10T06:31:14,307 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:14,308 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK], DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK]) is bad. 2024-11-10T06:31:14,308 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741860_1043 2024-11-10T06:31:14,308 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK] 2024-11-10T06:31:14,310 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37021 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:14,310 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40162 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data8]'}, localName='127.0.0.1:39305', datanodeUuid='b468639d-7b11-45e5-9a2a-01e054eead7f', xmitsInProgress=0}:Exception transferring block BP-741755570-172.17.0.2-1731220253746:blk_1073741861_1044 to mirror 127.0.0.1:37021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:14,310 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK], DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK]) is bad. 2024-11-10T06:31:14,310 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741861_1044 2024-11-10T06:31:14,310 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40162 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-10T06:31:14,310 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40162 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:39305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40162 dst: /127.0.0.1:39305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:14,311 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK] 2024-11-10T06:31:14,311 WARN [IPC Server handler 0 on default port 40625 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-10T06:31:14,312 WARN [IPC Server handler 0 on default port 40625 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-10T06:31:14,312 WARN [IPC Server handler 0 on default port 40625 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-10T06:31:14,314 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:14,314 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:14,314 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:14,314 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:14,315 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:14,315 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220272273 with entries=14, filesize=13.64 KB; new WAL /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220274299 2024-11-10T06:31:14,316 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41513:41513)] 2024-11-10T06:31:14,316 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220254898 is not closed yet, will try archiving it next time 2024-11-10T06:31:14,316 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220272273 is not closed yet, will try archiving it next time 2024-11-10T06:31:14,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741847_1030 (size=13979) 2024-11-10T06:31:14,320 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220266238 to hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/oldWALs/4999977c7e1b%2C45891%2C1731220254499.1731220266238 2024-11-10T06:31:14,321 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220270250 to hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/oldWALs/4999977c7e1b%2C45891%2C1731220254499.1731220270250 2024-11-10T06:31:14,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45891 {}] regionserver.HRegion(8855): Flush requested on 87051d5bfe1f8458f4aca6a150270b03 2024-11-10T06:31:14,329 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 87051d5bfe1f8458f4aca6a150270b03 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-10T06:31:14,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/7f4756a39131478c810819578a59c3d1 is 1079, key is tmprow/info:/1731220274328/Put/seqid=0 2024-11-10T06:31:14,335 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37021 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:14,335 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40180 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data8]'}, localName='127.0.0.1:39305', datanodeUuid='b468639d-7b11-45e5-9a2a-01e054eead7f', xmitsInProgress=0}:Exception transferring block BP-741755570-172.17.0.2-1731220253746:blk_1073741863_1046 to mirror 127.0.0.1:37021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:14,335 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK], DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK]) is bad. 2024-11-10T06:31:14,335 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741863_1046 2024-11-10T06:31:14,335 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40180 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-10T06:31:14,336 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40180 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:39305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40180 dst: /127.0.0.1:39305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:14,336 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK] 2024-11-10T06:31:14,337 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:14,337 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK], DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK]) is bad. 2024-11-10T06:31:14,337 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741864_1047 2024-11-10T06:31:14,338 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK] 2024-11-10T06:31:14,339 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:14,339 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK], DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]) is bad. 2024-11-10T06:31:14,339 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741865_1048 2024-11-10T06:31:14,339 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK] 2024-11-10T06:31:14,340 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:14,340 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK], DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK]) is bad. 2024-11-10T06:31:14,340 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741866_1049 2024-11-10T06:31:14,341 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK] 2024-11-10T06:31:14,341 WARN [IPC Server handler 4 on default port 40625 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-10T06:31:14,341 WARN [IPC Server handler 4 on default port 40625 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-10T06:31:14,341 WARN [IPC Server handler 4 on default port 40625 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-10T06:31:14,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741867_1050 (size=6027) 2024-11-10T06:31:14,581 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:14,717 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220254898 is not closed yet, will try archiving it next time 2024-11-10T06:31:14,745 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/7f4756a39131478c810819578a59c3d1 2024-11-10T06:31:14,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/7f4756a39131478c810819578a59c3d1 as hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/7f4756a39131478c810819578a59c3d1 2024-11-10T06:31:14,758 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/7f4756a39131478c810819578a59c3d1, entries=1, sequenceid=34, filesize=5.9 K 2024-11-10T06:31:14,759 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 87051d5bfe1f8458f4aca6a150270b03 in 431ms, sequenceid=34, compaction requested=true 2024-11-10T06:31:14,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 87051d5bfe1f8458f4aca6a150270b03: 2024-11-10T06:31:14,759 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-10T06:31:14,759 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:31:14,759 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/558e37bb2c7f4126beb1bc1fdefa744f because midkey is the same as first or last row 2024-11-10T06:31:14,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 87051d5bfe1f8458f4aca6a150270b03:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T06:31:14,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:31:14,760 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T06:31:14,761 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T06:31:14,761 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.HStore(1541): 87051d5bfe1f8458f4aca6a150270b03/info is initiating minor compaction (all files) 2024-11-10T06:31:14,761 INFO [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 87051d5bfe1f8458f4aca6a150270b03/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03. 2024-11-10T06:31:14,761 INFO [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/febb924d3ccf47988a3a97f420f847da, hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/558e37bb2c7f4126beb1bc1fdefa744f, hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/7f4756a39131478c810819578a59c3d1] into tmpdir=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp, totalSize=28.2 K 2024-11-10T06:31:14,762 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] compactions.Compactor(225): Compacting febb924d3ccf47988a3a97f420f847da, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731220268257 2024-11-10T06:31:14,762 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] compactions.Compactor(225): Compacting 558e37bb2c7f4126beb1bc1fdefa744f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731220272281 2024-11-10T06:31:14,763 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7f4756a39131478c810819578a59c3d1, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731220274328 2024-11-10T06:31:14,775 INFO [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 87051d5bfe1f8458f4aca6a150270b03#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T06:31:14,776 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/f938228b21b146119d1a1107a7ee3749 is 1080, key is row0002/info:/1731220268257/Put/seqid=0 2024-11-10T06:31:14,778 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:14,778 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK], DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK]) is bad. 2024-11-10T06:31:14,778 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741868_1051 2024-11-10T06:31:14,778 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK] 2024-11-10T06:31:14,781 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39575 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:14,781 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40206 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741869_1052] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data8]'}, localName='127.0.0.1:39305', datanodeUuid='b468639d-7b11-45e5-9a2a-01e054eead7f', xmitsInProgress=0}:Exception transferring block BP-741755570-172.17.0.2-1731220253746:blk_1073741869_1052 to mirror 127.0.0.1:39575 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:14,781 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK], DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK]) is bad. 2024-11-10T06:31:14,781 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741869_1052 2024-11-10T06:31:14,781 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40206 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741869_1052] {}] datanode.BlockReceiver(316): Block 1073741869 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-10T06:31:14,781 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40206 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741869_1052] {}] datanode.DataXceiver(331): 127.0.0.1:39305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40206 dst: /127.0.0.1:39305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:14,782 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK] 2024-11-10T06:31:14,783 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:14,783 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK], DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]) is bad. 2024-11-10T06:31:14,783 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741870_1053 2024-11-10T06:31:14,784 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK] 2024-11-10T06:31:14,786 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38159 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:14,786 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40216 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data8]'}, localName='127.0.0.1:39305', datanodeUuid='b468639d-7b11-45e5-9a2a-01e054eead7f', xmitsInProgress=0}:Exception transferring block BP-741755570-172.17.0.2-1731220253746:blk_1073741871_1054 to mirror 127.0.0.1:38159 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:14,786 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK], DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK]) is bad. 2024-11-10T06:31:14,786 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741871_1054 2024-11-10T06:31:14,786 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40216 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-10T06:31:14,786 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40216 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:39305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40216 dst: /127.0.0.1:39305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:14,787 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK] 2024-11-10T06:31:14,787 WARN [IPC Server handler 3 on default port 40625 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-10T06:31:14,787 WARN [IPC Server handler 3 on default port 40625 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-10T06:31:14,787 WARN [IPC Server handler 3 on default port 40625 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-10T06:31:14,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741872_1055 (size=17994) 2024-11-10T06:31:15,056 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4c84b298[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39305, datanodeUuid=b468639d-7b11-45e5-9a2a-01e054eead7f, infoPort=41513, infoSecurePort=0, ipcPort=33607, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746):Failed to transfer BP-741755570-172.17.0.2-1731220253746:blk_1073741852_1035 to 127.0.0.1:38361 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:15,056 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@155a9bf4[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39305, datanodeUuid=b468639d-7b11-45e5-9a2a-01e054eead7f, infoPort=41513, infoSecurePort=0, ipcPort=33607, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746):Failed to transfer BP-741755570-172.17.0.2-1731220253746:blk_1073741842_1025 to 127.0.0.1:38361 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:15,199 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/f938228b21b146119d1a1107a7ee3749 as hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/f938228b21b146119d1a1107a7ee3749 2024-11-10T06:31:15,206 INFO [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 87051d5bfe1f8458f4aca6a150270b03/info of 87051d5bfe1f8458f4aca6a150270b03 into f938228b21b146119d1a1107a7ee3749(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T06:31:15,206 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 87051d5bfe1f8458f4aca6a150270b03: 2024-11-10T06:31:15,206 INFO [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03., storeName=87051d5bfe1f8458f4aca6a150270b03/info, priority=13, startTime=1731220274759; duration=0sec 2024-11-10T06:31:15,207 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-10T06:31:15,207 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:31:15,207 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/f938228b21b146119d1a1107a7ee3749 because midkey is the same as first or last row 2024-11-10T06:31:15,207 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-10T06:31:15,207 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:31:15,207 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/f938228b21b146119d1a1107a7ee3749 because midkey is the same as first or last row 2024-11-10T06:31:15,207 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-10T06:31:15,207 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:31:15,207 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/f938228b21b146119d1a1107a7ee3749 because midkey is the same as first or last row 2024-11-10T06:31:15,207 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:31:15,207 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 87051d5bfe1f8458f4aca6a150270b03:info 2024-11-10T06:31:15,514 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:15,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45891 {}] regionserver.HRegion(8855): Flush requested on 87051d5bfe1f8458f4aca6a150270b03 2024-11-10T06:31:15,748 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 87051d5bfe1f8458f4aca6a150270b03 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-10T06:31:15,753 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/0233096ddff14d3ea06b690732e3d6b7 is 1079, key is tmprow/info:/1731220275747/Put/seqid=0 2024-11-10T06:31:15,754 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:15,755 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK], DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK]) is bad. 2024-11-10T06:31:15,755 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741873_1056 2024-11-10T06:31:15,755 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK] 2024-11-10T06:31:15,756 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:15,756 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK], DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK]) is bad. 2024-11-10T06:31:15,756 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741874_1057 2024-11-10T06:31:15,757 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37021,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK] 2024-11-10T06:31:15,758 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:15,758 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK], DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK]) is bad. 2024-11-10T06:31:15,758 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741875_1058 2024-11-10T06:31:15,758 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38159,DS-9d42829a-de72-477c-9039-78aa7a78ff04,DISK] 2024-11-10T06:31:15,760 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38361 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:15,760 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40236 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741876_1059] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data8]'}, localName='127.0.0.1:39305', datanodeUuid='b468639d-7b11-45e5-9a2a-01e054eead7f', xmitsInProgress=0}:Exception transferring block BP-741755570-172.17.0.2-1731220253746:blk_1073741876_1059 to mirror 127.0.0.1:38361 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:15,761 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK], DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]) is bad. 2024-11-10T06:31:15,761 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741876_1059 2024-11-10T06:31:15,761 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40236 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741876_1059] {}] datanode.BlockReceiver(316): Block 1073741876 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-10T06:31:15,761 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:40236 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741876_1059] {}] datanode.DataXceiver(331): 127.0.0.1:39305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40236 dst: /127.0.0.1:39305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:15,761 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK] 2024-11-10T06:31:15,762 WARN [IPC Server handler 2 on default port 40625 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-10T06:31:15,762 WARN [IPC Server handler 2 on default port 40625 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-10T06:31:15,762 WARN [IPC Server handler 2 on default port 40625 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-10T06:31:15,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741877_1060 (size=6027) 2024-11-10T06:31:16,045 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@155a9bf4[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39305, datanodeUuid=b468639d-7b11-45e5-9a2a-01e054eead7f, infoPort=41513, infoSecurePort=0, ipcPort=33607, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746):Failed to transfer BP-741755570-172.17.0.2-1731220253746:blk_1073741857_1040 to 127.0.0.1:38159 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:16,045 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4c84b298[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39305, datanodeUuid=b468639d-7b11-45e5-9a2a-01e054eead7f, infoPort=41513, infoSecurePort=0, ipcPort=33607, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746):Failed to transfer BP-741755570-172.17.0.2-1731220253746:blk_1073741847_1030 to 127.0.0.1:37021 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:16,166 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/0233096ddff14d3ea06b690732e3d6b7 2024-11-10T06:31:16,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/0233096ddff14d3ea06b690732e3d6b7 as hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/0233096ddff14d3ea06b690732e3d6b7 2024-11-10T06:31:16,178 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/0233096ddff14d3ea06b690732e3d6b7, entries=1, sequenceid=45, filesize=5.9 K 2024-11-10T06:31:16,180 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 87051d5bfe1f8458f4aca6a150270b03 in 431ms, sequenceid=45, compaction requested=false 2024-11-10T06:31:16,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 87051d5bfe1f8458f4aca6a150270b03: 2024-11-10T06:31:16,180 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-10T06:31:16,180 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:31:16,180 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/f938228b21b146119d1a1107a7ee3749 because midkey is the same as first or last row 2024-11-10T06:31:16,320 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:16,320 WARN [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-10T06:31:16,364 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:31:16,367 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:31:16,368 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:31:16,368 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:31:16,368 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T06:31:16,369 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@272904a9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:31:16,369 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25b5f6a5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:31:16,482 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@601efdaa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/java.io.tmpdir/jetty-localhost-37805-hadoop-hdfs-3_4_1-tests_jar-_-any-15006497306612892706/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:31:16,483 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c8b9f54{HTTP/1.1, (http/1.1)}{localhost:37805} 2024-11-10T06:31:16,483 INFO [Time-limited test {}] server.Server(415): Started @126184ms 2024-11-10T06:31:16,484 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:31:16,579 WARN [Thread-983 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:31:16,582 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:16,587 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdaafdc47c175dec2 with lease ID 0x9474d1e025b216c4: from storage DS-54e98695-2016-467c-93dc-5fe5d14e27b8 node DatanodeRegistration(127.0.0.1:35019, datanodeUuid=e19f1e86-46e6-4f0d-b75d-d7b0b9a2c8c8, infoPort=39161, infoSecurePort=0, ipcPort=34217, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T06:31:16,587 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdaafdc47c175dec2 with lease ID 0x9474d1e025b216c4: from storage DS-914d8c7b-c2fb-4ad6-9260-57e7c6dc8b45 node DatanodeRegistration(127.0.0.1:35019, datanodeUuid=e19f1e86-46e6-4f0d-b75d-d7b0b9a2c8c8, infoPort=39161, infoSecurePort=0, ipcPort=34217, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:31:17,514 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:18,045 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@155a9bf4[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39305, datanodeUuid=b468639d-7b11-45e5-9a2a-01e054eead7f, infoPort=41513, infoSecurePort=0, ipcPort=33607, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746):Failed to transfer BP-741755570-172.17.0.2-1731220253746:blk_1073741872_1055 to 127.0.0.1:38159 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:18,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741867_1050 (size=6027) 2024-11-10T06:31:18,321 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:18,582 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:19,045 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4c84b298[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39305, datanodeUuid=b468639d-7b11-45e5-9a2a-01e054eead7f, infoPort=41513, infoSecurePort=0, ipcPort=33607, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746):Failed to transfer BP-741755570-172.17.0.2-1731220253746:blk_1073741877_1060 to 127.0.0.1:38159 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:19,515 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:20,321 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:20,583 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:21,515 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:22,321 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:22,583 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:23,516 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:24,322 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:24,424 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T06:31:24,583 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:24,683 ERROR [FSHLog-0-hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData-prefix:4999977c7e1b,39613,1731220254444 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:24,683 WARN [FSHLog-0-hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData-prefix:4999977c7e1b,39613,1731220254444 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:24,683 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 4999977c7e1b%2C39613%2C1731220254444:(num 1731220254601) roll requested 2024-11-10T06:31:24,683 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C39613%2C1731220254444.1731220284683 2024-11-10T06:31:24,690 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:24,690 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:24,690 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:24,690 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:24,690 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:24,690 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/WALs/4999977c7e1b,39613,1731220254444/4999977c7e1b%2C39613%2C1731220254444.1731220254601 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/WALs/4999977c7e1b,39613,1731220254444/4999977c7e1b%2C39613%2C1731220254444.1731220284683 2024-11-10T06:31:24,691 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:24,691 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:24,691 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/WALs/4999977c7e1b,39613,1731220254444/4999977c7e1b%2C39613%2C1731220254444.1731220254601 2024-11-10T06:31:24,691 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41513:41513),(127.0.0.1/127.0.0.1:39161:39161)] 2024-11-10T06:31:24,691 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/WALs/4999977c7e1b,39613,1731220254444/4999977c7e1b%2C39613%2C1731220254444.1731220254601 is not closed yet, will try archiving it next time 2024-11-10T06:31:24,691 WARN [IPC Server handler 2 on default port 40625 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/WALs/4999977c7e1b,39613,1731220254444/4999977c7e1b%2C39613%2C1731220254444.1731220254601 has not been closed. Lease recovery is in progress. RecoveryId = 1062 for block blk_1073741830_1006 2024-11-10T06:31:24,692 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/WALs/4999977c7e1b,39613,1731220254444/4999977c7e1b%2C39613%2C1731220254444.1731220254601 after 1ms 2024-11-10T06:31:25,516 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:26,322 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:26,603 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5f0a58ff {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-741755570-172.17.0.2-1731220253746:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:38361,null,null]) java.net.ConnectException: Call From 4999977c7e1b/172.17.0.2 to localhost:39113 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-10T06:31:26,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741833_1019 (size=455) 2024-11-10T06:31:27,269 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220254898 to hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/oldWALs/4999977c7e1b%2C45891%2C1731220254499.1731220254898 2024-11-10T06:31:27,271 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220272273 to hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/oldWALs/4999977c7e1b%2C45891%2C1731220254499.1731220272273 2024-11-10T06:31:27,516 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:27,585 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6e9fe2bf[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35019, datanodeUuid=e19f1e86-46e6-4f0d-b75d-d7b0b9a2c8c8, infoPort=39161, infoSecurePort=0, ipcPort=34217, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746):Failed to transfer BP-741755570-172.17.0.2-1731220253746:blk_1073741833_1019 to 127.0.0.1:39575 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:28,323 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:28,693 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/WALs/4999977c7e1b,39613,1731220254444/4999977c7e1b%2C39613%2C1731220254444.1731220254601 after 4002ms 2024-11-10T06:31:29,517 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:30,323 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:30,586 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3265dca3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35019, datanodeUuid=e19f1e86-46e6-4f0d-b75d-d7b0b9a2c8c8, infoPort=39161, infoSecurePort=0, ipcPort=34217, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746):Failed to transfer BP-741755570-172.17.0.2-1731220253746:blk_1073741832_1008 to 127.0.0.1:39575 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:30,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741836_1012 (size=76) 2024-11-10T06:31:31,517 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:31,585 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3265dca3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35019, datanodeUuid=e19f1e86-46e6-4f0d-b75d-d7b0b9a2c8c8, infoPort=39161, infoSecurePort=0, ipcPort=34217, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746):Failed to transfer BP-741755570-172.17.0.2-1731220253746:blk_1073741826_1002 to 127.0.0.1:39575 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:31,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741828_1004 (size=1189) 2024-11-10T06:31:31,924 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C45891%2C1731220254499.1731220291923 2024-11-10T06:31:31,926 WARN [Thread-1017 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:31,927 WARN [Thread-1017 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741879_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK], DatanodeInfoWithStorage[127.0.0.1:35019,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK]) is bad. 2024-11-10T06:31:31,927 WARN [Thread-1017 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741879_1063 2024-11-10T06:31:31,927 WARN [Thread-1017 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK] 2024-11-10T06:31:31,932 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:31,932 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:31,932 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:31,932 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:31,932 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:31,932 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220274299 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220291923 2024-11-10T06:31:31,934 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39161:39161),(127.0.0.1/127.0.0.1:41513:41513)] 2024-11-10T06:31:31,934 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220274299 is not closed yet, will try archiving it next time 2024-11-10T06:31:31,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741862_1045 (size=13591) 2024-11-10T06:31:31,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45891 {}] regionserver.HRegion(8855): Flush requested on 87051d5bfe1f8458f4aca6a150270b03 2024-11-10T06:31:31,945 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 87051d5bfe1f8458f4aca6a150270b03 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-10T06:31:31,950 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/b38566f4f6034c16a70714384a8fa2d7 is 1080, key is row0013/info:/1731220291935/Put/seqid=0 2024-11-10T06:31:31,952 WARN [Thread-1023 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:31,952 WARN [Thread-1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741881_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK], DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK]) is bad. 2024-11-10T06:31:31,952 WARN [Thread-1023 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741881_1065 2024-11-10T06:31:31,952 WARN [Thread-1023 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK] 2024-11-10T06:31:31,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741882_1066 (size=11421) 2024-11-10T06:31:31,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741882_1066 (size=11421) 2024-11-10T06:31:31,958 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/b38566f4f6034c16a70714384a8fa2d7 2024-11-10T06:31:31,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/b38566f4f6034c16a70714384a8fa2d7 as hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/b38566f4f6034c16a70714384a8fa2d7 2024-11-10T06:31:31,970 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/b38566f4f6034c16a70714384a8fa2d7, entries=6, sequenceid=55, filesize=11.2 K 2024-11-10T06:31:31,971 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for 87051d5bfe1f8458f4aca6a150270b03 in 26ms, sequenceid=55, compaction requested=true 2024-11-10T06:31:31,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 87051d5bfe1f8458f4aca6a150270b03: 2024-11-10T06:31:31,971 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-11-10T06:31:31,971 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:31:31,971 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/f938228b21b146119d1a1107a7ee3749 because midkey is the same as first or last row 2024-11-10T06:31:31,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 87051d5bfe1f8458f4aca6a150270b03:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T06:31:31,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:31:31,971 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T06:31:31,972 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T06:31:31,972 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.HStore(1541): 87051d5bfe1f8458f4aca6a150270b03/info is initiating minor compaction (all files) 2024-11-10T06:31:31,972 INFO [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 87051d5bfe1f8458f4aca6a150270b03/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03. 2024-11-10T06:31:31,973 INFO [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/f938228b21b146119d1a1107a7ee3749, hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/0233096ddff14d3ea06b690732e3d6b7, hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/b38566f4f6034c16a70714384a8fa2d7] into tmpdir=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp, totalSize=34.6 K 2024-11-10T06:31:31,973 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] compactions.Compactor(225): Compacting f938228b21b146119d1a1107a7ee3749, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731220268257 2024-11-10T06:31:31,973 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0233096ddff14d3ea06b690732e3d6b7, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731220275747 2024-11-10T06:31:31,974 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] compactions.Compactor(225): Compacting b38566f4f6034c16a70714384a8fa2d7, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731220276153 2024-11-10T06:31:31,990 INFO [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 87051d5bfe1f8458f4aca6a150270b03#info#compaction#24 average throughput is 17.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T06:31:31,990 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/5f856c400ef64679a4438a96a70095c2 is 1080, key is row0002/info:/1731220268257/Put/seqid=0 2024-11-10T06:31:31,992 WARN [Thread-1032 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:31,992 WARN [Thread-1032 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741883_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK], DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK]) is bad. 2024-11-10T06:31:31,992 WARN [Thread-1032 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741883_1067 2024-11-10T06:31:31,993 WARN [Thread-1032 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK] 2024-11-10T06:31:31,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741884_1068 (size=23502) 2024-11-10T06:31:31,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741884_1068 (size=23502) 2024-11-10T06:31:32,004 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/5f856c400ef64679a4438a96a70095c2 as hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/5f856c400ef64679a4438a96a70095c2 2024-11-10T06:31:32,011 INFO [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 87051d5bfe1f8458f4aca6a150270b03/info of 87051d5bfe1f8458f4aca6a150270b03 into 5f856c400ef64679a4438a96a70095c2(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T06:31:32,011 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 87051d5bfe1f8458f4aca6a150270b03: 2024-11-10T06:31:32,011 INFO [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03., storeName=87051d5bfe1f8458f4aca6a150270b03/info, priority=13, startTime=1731220291971; duration=0sec 2024-11-10T06:31:32,011 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-10T06:31:32,011 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:31:32,011 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/5f856c400ef64679a4438a96a70095c2 because midkey is the same as first or last row 2024-11-10T06:31:32,011 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-10T06:31:32,011 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:31:32,011 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/5f856c400ef64679a4438a96a70095c2 because midkey is the same as first or last row 2024-11-10T06:31:32,011 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-10T06:31:32,011 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:31:32,011 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/5f856c400ef64679a4438a96a70095c2 because midkey is the same as first or last row 2024-11-10T06:31:32,011 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:31:32,011 DEBUG [RS:0;4999977c7e1b:45891-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 87051d5bfe1f8458f4aca6a150270b03:info 2024-11-10T06:31:32,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45891 {}] regionserver.HRegion(8855): Flush requested on 87051d5bfe1f8458f4aca6a150270b03 2024-11-10T06:31:32,160 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 87051d5bfe1f8458f4aca6a150270b03 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-10T06:31:32,166 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/8584d4aff0be45d89afbff6be32e9fac is 1080, key is row0018/info:/1731220291946/Put/seqid=0 2024-11-10T06:31:32,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741885_1069 (size=11421) 2024-11-10T06:31:32,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741885_1069 (size=11421) 2024-11-10T06:31:32,172 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/8584d4aff0be45d89afbff6be32e9fac 2024-11-10T06:31:32,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/.tmp/info/8584d4aff0be45d89afbff6be32e9fac as hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/8584d4aff0be45d89afbff6be32e9fac 2024-11-10T06:31:32,183 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/8584d4aff0be45d89afbff6be32e9fac, entries=6, sequenceid=66, filesize=11.2 K 2024-11-10T06:31:32,185 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 87051d5bfe1f8458f4aca6a150270b03 in 24ms, sequenceid=66, compaction requested=false 2024-11-10T06:31:32,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 87051d5bfe1f8458f4aca6a150270b03: 2024-11-10T06:31:32,185 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.1 K, sizeToCheck=16.0 K 2024-11-10T06:31:32,185 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:31:32,185 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/5f856c400ef64679a4438a96a70095c2 because midkey is the same as first or last row 2024-11-10T06:31:32,323 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:32,323 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-10T06:31:32,335 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.1731220274299 to hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/oldWALs/4999977c7e1b%2C45891%2C1731220254499.1731220274299 2024-11-10T06:31:32,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T06:31:32,361 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T06:31:32,361 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:31:32,361 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:31:32,362 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:31:32,362 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T06:31:32,362 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T06:31:32,362 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=677162162, stopped=false 2024-11-10T06:31:32,362 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=4999977c7e1b,39613,1731220254444 2024-11-10T06:31:32,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T06:31:32,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45891-0x10190e024420001, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T06:31:32,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x10190e024420002, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T06:31:32,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:31:32,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45891-0x10190e024420001, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:31:32,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x10190e024420002, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:31:32,364 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T06:31:32,364 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T06:31:32,364 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:31:32,365 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:31:32,365 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4999977c7e1b,45891,1731220254499' ***** 2024-11-10T06:31:32,365 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T06:31:32,365 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4999977c7e1b,42305,1731220255452' ***** 2024-11-10T06:31:32,365 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T06:31:32,365 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42305-0x10190e024420002, quorum=127.0.0.1:62359, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:31:32,365 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45891-0x10190e024420001, quorum=127.0.0.1:62359, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:31:32,365 INFO [RS:1;4999977c7e1b:42305 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T06:31:32,365 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T06:31:32,365 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:31:32,365 INFO [RS:0;4999977c7e1b:45891 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T06:31:32,365 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T06:31:32,365 INFO [RS:1;4999977c7e1b:42305 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T06:31:32,365 INFO [RS:1;4999977c7e1b:42305 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T06:31:32,365 INFO [RS:0;4999977c7e1b:45891 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T06:31:32,365 INFO [RS:1;4999977c7e1b:42305 {}] regionserver.HRegionServer(959): stopping server 4999977c7e1b,42305,1731220255452 2024-11-10T06:31:32,365 INFO [RS:1;4999977c7e1b:42305 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T06:31:32,365 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.HRegionServer(3091): Received CLOSE for 87051d5bfe1f8458f4aca6a150270b03 2024-11-10T06:31:32,365 INFO [RS:1;4999977c7e1b:42305 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;4999977c7e1b:42305. 2024-11-10T06:31:32,366 DEBUG [RS:1;4999977c7e1b:42305 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:31:32,366 DEBUG [RS:1;4999977c7e1b:42305 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:31:32,366 INFO [RS:1;4999977c7e1b:42305 {}] regionserver.HRegionServer(976): stopping server 4999977c7e1b,42305,1731220255452; all regions closed. 2024-11-10T06:31:32,366 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.HRegionServer(959): stopping server 4999977c7e1b,45891,1731220254499 2024-11-10T06:31:32,366 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T06:31:32,366 INFO [RS:0;4999977c7e1b:45891 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T06:31:32,366 INFO [RS:0;4999977c7e1b:45891 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;4999977c7e1b:45891. 2024-11-10T06:31:32,366 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 87051d5bfe1f8458f4aca6a150270b03, disabling compactions & flushes 2024-11-10T06:31:32,366 DEBUG [RS:0;4999977c7e1b:45891 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:31:32,366 DEBUG [RS:0;4999977c7e1b:45891 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:31:32,366 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03. 2024-11-10T06:31:32,366 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03. 2024-11-10T06:31:32,366 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:32,366 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03. after waiting 0 ms 2024-11-10T06:31:32,366 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03. 2024-11-10T06:31:32,366 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:32,367 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:32,367 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:32,367 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T06:31:32,367 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:32,367 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T06:31:32,367 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T06:31:32,367 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T06:31:32,367 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-10T06:31:32,367 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:32,367 DEBUG [RS:0;4999977c7e1b:45891 {}] regionserver.HRegionServer(1325): Online Regions={87051d5bfe1f8458f4aca6a150270b03=TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03., 1588230740=hbase:meta,,1.1588230740} 2024-11-10T06:31:32,367 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/febb924d3ccf47988a3a97f420f847da, hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/558e37bb2c7f4126beb1bc1fdefa744f, hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/f938228b21b146119d1a1107a7ee3749, hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/7f4756a39131478c810819578a59c3d1, hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/0233096ddff14d3ea06b690732e3d6b7, hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/b38566f4f6034c16a70714384a8fa2d7] to archive 2024-11-10T06:31:32,367 DEBUG [RS:0;4999977c7e1b:45891 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 87051d5bfe1f8458f4aca6a150270b03 2024-11-10T06:31:32,367 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:32,367 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T06:31:32,368 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 2024-11-10T06:31:32,368 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T06:31:32,368 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T06:31:32,368 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T06:31:32,368 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T06:31:32,368 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-10T06:31:32,368 WARN [IPC Server handler 3 on default port 40625 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 has not been closed. Lease recovery is in progress. RecoveryId = 1070 for block blk_1073741837_1013 2024-11-10T06:31:32,368 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 after 0ms 2024-11-10T06:31:32,368 ERROR [FSHLog-0-hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf-prefix:4999977c7e1b,45891,1731220254499.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:32,368 WARN [FSHLog-0-hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf-prefix:4999977c7e1b,45891,1731220254499.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:32,369 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-10T06:31:32,369 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 4999977c7e1b%2C45891%2C1731220254499.meta:.meta(num 1731220255324) roll requested 2024-11-10T06:31:32,369 INFO [regionserver/4999977c7e1b:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C45891%2C1731220254499.meta.1731220292369.meta 2024-11-10T06:31:32,371 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/febb924d3ccf47988a3a97f420f847da to hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/febb924d3ccf47988a3a97f420f847da 2024-11-10T06:31:32,372 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/558e37bb2c7f4126beb1bc1fdefa744f to hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/558e37bb2c7f4126beb1bc1fdefa744f 2024-11-10T06:31:32,374 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/f938228b21b146119d1a1107a7ee3749 to hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/f938228b21b146119d1a1107a7ee3749 2024-11-10T06:31:32,375 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/7f4756a39131478c810819578a59c3d1 to hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/7f4756a39131478c810819578a59c3d1 2024-11-10T06:31:32,377 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/0233096ddff14d3ea06b690732e3d6b7 to hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/0233096ddff14d3ea06b690732e3d6b7 2024-11-10T06:31:32,378 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:32,378 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:32,378 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:32,378 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:32,378 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:32,378 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/b38566f4f6034c16a70714384a8fa2d7 to hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/info/b38566f4f6034c16a70714384a8fa2d7 2024-11-10T06:31:32,378 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220292369.meta 2024-11-10T06:31:32,379 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:32,379 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=4999977c7e1b:39613 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-10T06:31:32,379 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [febb924d3ccf47988a3a97f420f847da=10347, 558e37bb2c7f4126beb1bc1fdefa744f=12506, f938228b21b146119d1a1107a7ee3749=17994, 7f4756a39131478c810819578a59c3d1=6027, 0233096ddff14d3ea06b690732e3d6b7=6027, b38566f4f6034c16a70714384a8fa2d7=11421] 2024-11-10T06:31:32,379 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38361,DS-724eaefd-57fa-446e-bf10-0c6346e1115f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:32,379 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta 2024-11-10T06:31:32,379 WARN [IPC Server handler 0 on default port 40625 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta has not been closed. Lease recovery is in progress. RecoveryId = 1072 for block blk_1073741834_1010 2024-11-10T06:31:32,380 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta after 1ms 2024-11-10T06:31:32,384 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39161:39161),(127.0.0.1/127.0.0.1:41513:41513)] 2024-11-10T06:31:32,384 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta is not closed yet, will try archiving it next time 2024-11-10T06:31:32,388 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/default/TestLogRolling-testLogRollOnDatanodeDeath/87051d5bfe1f8458f4aca6a150270b03/recovered.edits/69.seqid, newMaxSeqId=69, maxSeqId=1 2024-11-10T06:31:32,389 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03. 2024-11-10T06:31:32,389 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 87051d5bfe1f8458f4aca6a150270b03: Waiting for close lock at 1731220292366Running coprocessor pre-close hooks at 1731220292366Disabling compacts and flushes for region at 1731220292366Disabling writes for close at 1731220292366Writing region close event to WAL at 1731220292384 (+18 ms)Running coprocessor post-close hooks at 1731220292389 (+5 ms)Closed at 1731220292389 2024-11-10T06:31:32,389 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03. 2024-11-10T06:31:32,400 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740/.tmp/info/9c55cf710baa44d2880c5cacc1ccee97 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731220255547.87051d5bfe1f8458f4aca6a150270b03./info:regioninfo/1731220255910/Put/seqid=0 2024-11-10T06:31:32,402 WARN [Thread-1050 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:32,402 WARN [Thread-1050 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741887_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK], DatanodeInfoWithStorage[127.0.0.1:35019,DS-54e98695-2016-467c-93dc-5fe5d14e27b8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK]) is bad. 2024-11-10T06:31:32,402 WARN [Thread-1050 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741887_1073 2024-11-10T06:31:32,403 WARN [Thread-1050 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK] 2024-11-10T06:31:32,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741888_1074 (size=7089) 2024-11-10T06:31:32,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741888_1074 (size=7089) 2024-11-10T06:31:32,409 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740/.tmp/info/9c55cf710baa44d2880c5cacc1ccee97 2024-11-10T06:31:32,430 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740/.tmp/ns/976721abbe28475582a69a9c3878da2b is 43, key is default/ns:d/1731220255370/Put/seqid=0 2024-11-10T06:31:32,432 WARN [Thread-1057 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:32,432 WARN [Thread-1057 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741889_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK], DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK]) is bad. 2024-11-10T06:31:32,432 WARN [Thread-1057 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741889_1075 2024-11-10T06:31:32,433 WARN [Thread-1057 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK] 2024-11-10T06:31:32,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741890_1076 (size=5153) 2024-11-10T06:31:32,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741890_1076 (size=5153) 2024-11-10T06:31:32,438 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740/.tmp/ns/976721abbe28475582a69a9c3878da2b 2024-11-10T06:31:32,459 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740/.tmp/table/a3e2261865c74f379a5fbff7dd6cd7bb is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731220255921/Put/seqid=0 2024-11-10T06:31:32,461 WARN [Thread-1063 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1077 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39575 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:32,461 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:51962 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741891_1077] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data8]'}, localName='127.0.0.1:39305', datanodeUuid='b468639d-7b11-45e5-9a2a-01e054eead7f', xmitsInProgress=0}:Exception transferring block BP-741755570-172.17.0.2-1731220253746:blk_1073741891_1077 to mirror 127.0.0.1:39575 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:32,462 WARN [Thread-1063 {}] hdfs.DataStreamer(1731): Error Recovery for BP-741755570-172.17.0.2-1731220253746:blk_1073741891_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39305,DS-1e821df2-9aa8-4433-9dbd-eeb3f2691f9d,DISK], DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK]) is bad. 2024-11-10T06:31:32,462 WARN [Thread-1063 {}] hdfs.DataStreamer(1850): Abandoning BP-741755570-172.17.0.2-1731220253746:blk_1073741891_1077 2024-11-10T06:31:32,462 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:51962 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741891_1077] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-10T06:31:32,462 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1557404151_22 at /127.0.0.1:51962 [Receiving block BP-741755570-172.17.0.2-1731220253746:blk_1073741891_1077] {}] datanode.DataXceiver(331): 127.0.0.1:39305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51962 dst: /127.0.0.1:39305 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:32,462 WARN [Thread-1063 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39575,DS-6983d177-432e-4217-85a8-e2c7a50145fb,DISK] 2024-11-10T06:31:32,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741892_1078 (size=5424) 2024-11-10T06:31:32,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741892_1078 (size=5424) 2024-11-10T06:31:32,467 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740/.tmp/table/a3e2261865c74f379a5fbff7dd6cd7bb 2024-11-10T06:31:32,473 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740/.tmp/info/9c55cf710baa44d2880c5cacc1ccee97 as hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740/info/9c55cf710baa44d2880c5cacc1ccee97 2024-11-10T06:31:32,479 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740/info/9c55cf710baa44d2880c5cacc1ccee97, entries=10, sequenceid=11, filesize=6.9 K 2024-11-10T06:31:32,480 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740/.tmp/ns/976721abbe28475582a69a9c3878da2b as hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740/ns/976721abbe28475582a69a9c3878da2b 2024-11-10T06:31:32,485 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740/ns/976721abbe28475582a69a9c3878da2b, entries=2, sequenceid=11, filesize=5.0 K 2024-11-10T06:31:32,486 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740/.tmp/table/a3e2261865c74f379a5fbff7dd6cd7bb as hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740/table/a3e2261865c74f379a5fbff7dd6cd7bb 2024-11-10T06:31:32,491 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740/table/a3e2261865c74f379a5fbff7dd6cd7bb, entries=2, sequenceid=11, filesize=5.3 K 2024-11-10T06:31:32,492 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 124ms, sequenceid=11, compaction requested=false 2024-11-10T06:31:32,497 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-10T06:31:32,498 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T06:31:32,498 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T06:31:32,498 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731220292367Running coprocessor pre-close hooks at 1731220292367Disabling compacts and flushes for region at 1731220292367Disabling writes for close at 1731220292368 (+1 ms)Obtaining lock to block concurrent updates at 1731220292368Preparing flush snapshotting stores in 1588230740 at 1731220292368Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731220292368Flushing stores of hbase:meta,,1.1588230740 at 1731220292385 (+17 ms)Flushing 1588230740/info: creating writer at 1731220292385Flushing 1588230740/info: appending metadata at 1731220292400 (+15 ms)Flushing 1588230740/info: closing flushed file at 1731220292400Flushing 1588230740/ns: creating writer at 1731220292415 (+15 ms)Flushing 1588230740/ns: appending metadata at 1731220292429 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731220292429Flushing 1588230740/table: creating writer at 1731220292444 (+15 ms)Flushing 1588230740/table: appending metadata at 1731220292459 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731220292459Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3abad57: reopening flushed file at 1731220292472 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@508cdee6: reopening flushed file at 1731220292479 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f789be0: reopening flushed file at 1731220292485 (+6 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 124ms, sequenceid=11, compaction requested=false at 1731220292492 (+7 ms)Writing region close event to WAL at 1731220292494 (+2 ms)Running coprocessor post-close hooks at 1731220292498 (+4 ms)Closed at 1731220292498 2024-11-10T06:31:32,498 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T06:31:32,512 INFO [regionserver/4999977c7e1b:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-10T06:31:32,512 INFO [regionserver/4999977c7e1b:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-10T06:31:32,568 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.HRegionServer(976): stopping server 4999977c7e1b,45891,1731220254499; all regions closed. 2024-11-10T06:31:32,568 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:32,568 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:32,569 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:32,569 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:32,569 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:32,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741886_1071 (size=825) 2024-11-10T06:31:32,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741886_1071 (size=825) 2024-11-10T06:31:32,761 INFO [regionserver/4999977c7e1b:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-10T06:31:32,762 INFO [regionserver/4999977c7e1b:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-10T06:31:32,762 INFO [regionserver/4999977c7e1b:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T06:31:33,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741862_1045 (size=13591) 2024-11-10T06:31:33,514 INFO [regionserver/4999977c7e1b:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T06:31:33,585 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3265dca3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35019, datanodeUuid=e19f1e86-46e6-4f0d-b75d-d7b0b9a2c8c8, infoPort=39161, infoSecurePort=0, ipcPort=34217, storageInfo=lv=-57;cid=testClusterID;nsid=1053045312;c=1731220253746):Failed to transfer BP-741755570-172.17.0.2-1731220253746:blk_1073741827_1003 to 127.0.0.1:39575 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:33,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741829_1005 (size=34) 2024-11-10T06:31:34,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741825_1001 (size=7) 2024-11-10T06:31:35,498 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-10T06:31:35,499 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T06:31:35,500 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-10T06:31:36,135 INFO [master/4999977c7e1b:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-10T06:31:36,135 INFO [master/4999977c7e1b:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-10T06:31:36,369 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 after 4001ms 2024-11-10T06:31:36,381 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta after 4001ms 2024-11-10T06:31:36,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741835_1011 (size=393) 2024-11-10T06:31:36,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741831_1007 (size=1321) 2024-11-10T06:31:36,606 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4a1a384a {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-741755570-172.17.0.2-1731220253746:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:38361,null,null]) java.net.ConnectException: Call From 4999977c7e1b/172.17.0.2 to localhost:39113 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-10T06:31:37,368 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-10T06:31:37,370 DEBUG [RS:1;4999977c7e1b:42305 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/oldWALs 2024-11-10T06:31:37,370 INFO [RS:1;4999977c7e1b:42305 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 4999977c7e1b%2C42305%2C1731220255452:(num 1731220255648) 2024-11-10T06:31:37,370 DEBUG [RS:1;4999977c7e1b:42305 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:31:37,370 INFO [RS:1;4999977c7e1b:42305 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T06:31:37,370 INFO [RS:1;4999977c7e1b:42305 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T06:31:37,370 INFO [RS:1;4999977c7e1b:42305 {}] hbase.ChoreService(370): Chore service for: regionserver/4999977c7e1b:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-10T06:31:37,370 INFO [RS:1;4999977c7e1b:42305 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T06:31:37,370 INFO [RS:1;4999977c7e1b:42305 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T06:31:37,370 INFO [RS:1;4999977c7e1b:42305 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T06:31:37,370 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T06:31:37,371 INFO [RS:1;4999977c7e1b:42305 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T06:31:37,371 INFO [RS:1;4999977c7e1b:42305 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42305 2024-11-10T06:31:37,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T06:31:37,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x10190e024420002, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4999977c7e1b,42305,1731220255452 2024-11-10T06:31:37,373 INFO [RS:1;4999977c7e1b:42305 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T06:31:37,375 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4999977c7e1b,42305,1731220255452] 2024-11-10T06:31:37,376 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4999977c7e1b,42305,1731220255452 already deleted, retry=false 2024-11-10T06:31:37,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:37,376 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4999977c7e1b,42305,1731220255452 expired; onlineServers=1 2024-11-10T06:31:37,390 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:37,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:37,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:37,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:37,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:37,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:37,419 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:37,421 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:37,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x10190e024420002, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:31:37,475 INFO [RS:1;4999977c7e1b:42305 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T06:31:37,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x10190e024420002, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:31:37,475 INFO [RS:1;4999977c7e1b:42305 {}] regionserver.HRegionServer(1031): Exiting; stopping=4999977c7e1b,42305,1731220255452; zookeeper connection closed. 2024-11-10T06:31:37,475 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@90ae5bf {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@90ae5bf 2024-11-10T06:31:37,569 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-10T06:31:37,573 DEBUG [RS:0;4999977c7e1b:45891 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/oldWALs 2024-11-10T06:31:37,573 INFO [RS:0;4999977c7e1b:45891 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 4999977c7e1b%2C45891%2C1731220254499.meta:.meta(num 1731220292369) 2024-11-10T06:31:37,573 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:37,574 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:37,574 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:37,574 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:37,574 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:37,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741880_1064 (size=16308) 2024-11-10T06:31:37,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741880_1064 (size=16308) 2024-11-10T06:31:37,578 DEBUG [RS:0;4999977c7e1b:45891 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/oldWALs 2024-11-10T06:31:37,578 INFO [RS:0;4999977c7e1b:45891 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 4999977c7e1b%2C45891%2C1731220254499:(num 1731220291923) 2024-11-10T06:31:37,578 DEBUG [RS:0;4999977c7e1b:45891 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:31:37,578 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T06:31:37,578 INFO [RS:0;4999977c7e1b:45891 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T06:31:37,579 INFO [RS:0;4999977c7e1b:45891 {}] hbase.ChoreService(370): Chore service for: regionserver/4999977c7e1b:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T06:31:37,579 INFO [RS:0;4999977c7e1b:45891 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T06:31:37,579 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T06:31:37,579 INFO [RS:0;4999977c7e1b:45891 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45891 2024-11-10T06:31:37,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T06:31:37,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45891-0x10190e024420001, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4999977c7e1b,45891,1731220254499 2024-11-10T06:31:37,581 INFO [RS:0;4999977c7e1b:45891 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T06:31:37,582 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4999977c7e1b,45891,1731220254499] 2024-11-10T06:31:37,584 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4999977c7e1b,45891,1731220254499 already deleted, retry=false 2024-11-10T06:31:37,584 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4999977c7e1b,45891,1731220254499 expired; onlineServers=0 2024-11-10T06:31:37,584 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '4999977c7e1b,39613,1731220254444' ***** 2024-11-10T06:31:37,584 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T06:31:37,584 INFO [M:0;4999977c7e1b:39613 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T06:31:37,584 INFO [M:0;4999977c7e1b:39613 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T06:31:37,584 DEBUG [M:0;4999977c7e1b:39613 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T06:31:37,584 DEBUG [M:0;4999977c7e1b:39613 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T06:31:37,584 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T06:31:37,584 DEBUG [master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220254684 {}] cleaner.HFileCleaner(306): Exit Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220254684,5,FailOnTimeoutGroup] 2024-11-10T06:31:37,585 INFO [M:0;4999977c7e1b:39613 {}] hbase.ChoreService(370): Chore service for: master/4999977c7e1b:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T06:31:37,585 DEBUG [master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220254684 {}] cleaner.HFileCleaner(306): Exit Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220254684,5,FailOnTimeoutGroup] 2024-11-10T06:31:37,585 INFO [M:0;4999977c7e1b:39613 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T06:31:37,585 DEBUG [M:0;4999977c7e1b:39613 {}] master.HMaster(1795): Stopping service threads 2024-11-10T06:31:37,585 INFO [M:0;4999977c7e1b:39613 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T06:31:37,585 INFO [M:0;4999977c7e1b:39613 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T06:31:37,585 INFO [M:0;4999977c7e1b:39613 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T06:31:37,585 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T06:31:37,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T06:31:37,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:31:37,586 DEBUG [M:0;4999977c7e1b:39613 {}] zookeeper.ZKUtil(347): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T06:31:37,586 WARN [M:0;4999977c7e1b:39613 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T06:31:37,587 INFO [M:0;4999977c7e1b:39613 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/.lastflushedseqids 2024-11-10T06:31:37,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741893_1079 (size=130) 2024-11-10T06:31:37,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741893_1079 (size=130) 2024-11-10T06:31:37,593 INFO [M:0;4999977c7e1b:39613 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T06:31:37,593 INFO [M:0;4999977c7e1b:39613 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T06:31:37,593 DEBUG [M:0;4999977c7e1b:39613 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T06:31:37,593 INFO [M:0;4999977c7e1b:39613 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:31:37,593 DEBUG [M:0;4999977c7e1b:39613 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:31:37,593 DEBUG [M:0;4999977c7e1b:39613 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T06:31:37,593 DEBUG [M:0;4999977c7e1b:39613 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:31:37,594 INFO [M:0;4999977c7e1b:39613 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-10T06:31:37,611 DEBUG [M:0;4999977c7e1b:39613 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d968bb6fac074e21856e7150faf40220 is 82, key is hbase:meta,,1/info:regioninfo/1731220255351/Put/seqid=0 2024-11-10T06:31:37,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741894_1080 (size=5672) 2024-11-10T06:31:37,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741894_1080 (size=5672) 2024-11-10T06:31:37,617 INFO [M:0;4999977c7e1b:39613 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d968bb6fac074e21856e7150faf40220 2024-11-10T06:31:37,643 DEBUG [M:0;4999977c7e1b:39613 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/671b6bb73b914db99fd6ae2d7cb7b60a is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731220255925/Put/seqid=0 2024-11-10T06:31:37,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741895_1081 (size=6255) 2024-11-10T06:31:37,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741895_1081 (size=6255) 2024-11-10T06:31:37,649 INFO [M:0;4999977c7e1b:39613 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/671b6bb73b914db99fd6ae2d7cb7b60a 2024-11-10T06:31:37,655 INFO [M:0;4999977c7e1b:39613 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 671b6bb73b914db99fd6ae2d7cb7b60a 2024-11-10T06:31:37,670 DEBUG [M:0;4999977c7e1b:39613 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/29adc1192f874e3284359ade50152601 is 69, key is 4999977c7e1b,42305,1731220255452/rs:state/1731220255498/Put/seqid=0 2024-11-10T06:31:37,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741896_1082 (size=5224) 2024-11-10T06:31:37,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741896_1082 (size=5224) 2024-11-10T06:31:37,677 INFO [M:0;4999977c7e1b:39613 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/29adc1192f874e3284359ade50152601 2024-11-10T06:31:37,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45891-0x10190e024420001, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:31:37,683 INFO [RS:0;4999977c7e1b:45891 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T06:31:37,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45891-0x10190e024420001, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:31:37,683 INFO [RS:0;4999977c7e1b:45891 {}] regionserver.HRegionServer(1031): Exiting; stopping=4999977c7e1b,45891,1731220254499; zookeeper connection closed. 2024-11-10T06:31:37,683 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@f9fb3ff {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@f9fb3ff 2024-11-10T06:31:37,683 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-10T06:31:37,698 DEBUG [M:0;4999977c7e1b:39613 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5a19043e7e5243a587a4267fd0e79bd6 is 52, key is load_balancer_on/state:d/1731220255434/Put/seqid=0 2024-11-10T06:31:37,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741897_1083 (size=5056) 2024-11-10T06:31:37,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741897_1083 (size=5056) 2024-11-10T06:31:37,704 INFO [M:0;4999977c7e1b:39613 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5a19043e7e5243a587a4267fd0e79bd6 2024-11-10T06:31:37,710 DEBUG [M:0;4999977c7e1b:39613 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d968bb6fac074e21856e7150faf40220 as hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d968bb6fac074e21856e7150faf40220 2024-11-10T06:31:37,716 INFO [M:0;4999977c7e1b:39613 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d968bb6fac074e21856e7150faf40220, entries=8, sequenceid=60, filesize=5.5 K 2024-11-10T06:31:37,717 DEBUG [M:0;4999977c7e1b:39613 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/671b6bb73b914db99fd6ae2d7cb7b60a as hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/671b6bb73b914db99fd6ae2d7cb7b60a 2024-11-10T06:31:37,722 INFO [M:0;4999977c7e1b:39613 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 671b6bb73b914db99fd6ae2d7cb7b60a 2024-11-10T06:31:37,722 INFO [M:0;4999977c7e1b:39613 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/671b6bb73b914db99fd6ae2d7cb7b60a, entries=6, sequenceid=60, filesize=6.1 K 2024-11-10T06:31:37,723 DEBUG [M:0;4999977c7e1b:39613 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/29adc1192f874e3284359ade50152601 as hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/29adc1192f874e3284359ade50152601 2024-11-10T06:31:37,728 INFO [M:0;4999977c7e1b:39613 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/29adc1192f874e3284359ade50152601, entries=2, sequenceid=60, filesize=5.1 K 2024-11-10T06:31:37,729 DEBUG [M:0;4999977c7e1b:39613 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5a19043e7e5243a587a4267fd0e79bd6 as hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5a19043e7e5243a587a4267fd0e79bd6 2024-11-10T06:31:37,734 INFO [M:0;4999977c7e1b:39613 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5a19043e7e5243a587a4267fd0e79bd6, entries=1, sequenceid=60, filesize=4.9 K 2024-11-10T06:31:37,735 INFO [M:0;4999977c7e1b:39613 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 142ms, sequenceid=60, compaction requested=false 2024-11-10T06:31:37,736 INFO [M:0;4999977c7e1b:39613 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:31:37,736 DEBUG [M:0;4999977c7e1b:39613 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731220297593Disabling compacts and flushes for region at 1731220297593Disabling writes for close at 1731220297593Obtaining lock to block concurrent updates at 1731220297594 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731220297594Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731220297594Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731220297595 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731220297595Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731220297611 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731220297611Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731220297623 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731220297642 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731220297642Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731220297655 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731220297670 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731220297670Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731220297682 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731220297698 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731220297698Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f5f47fe: reopening flushed file at 1731220297710 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2eeee2ac: reopening flushed file at 1731220297716 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ebab26f: reopening flushed file at 1731220297722 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@b81e7ff: reopening flushed file at 1731220297728 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 142ms, sequenceid=60, compaction requested=false at 1731220297735 (+7 ms)Writing region close event to WAL at 1731220297736 (+1 ms)Closed at 1731220297736 2024-11-10T06:31:37,737 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:37,737 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:37,737 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:37,737 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:37,737 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:37,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35019 is added to blk_1073741878_1061 (size=1045) 2024-11-10T06:31:37,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39305 is added to blk_1073741878_1061 (size=1045) 2024-11-10T06:31:37,740 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T06:31:37,740 INFO [M:0;4999977c7e1b:39613 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T06:31:37,740 INFO [M:0;4999977c7e1b:39613 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39613 2024-11-10T06:31:37,740 INFO [M:0;4999977c7e1b:39613 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T06:31:37,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:31:37,842 INFO [M:0;4999977c7e1b:39613 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T06:31:37,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39613-0x10190e024420000, quorum=127.0.0.1:62359, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:31:37,845 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@601efdaa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:31:37,845 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c8b9f54{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:31:37,845 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:31:37,845 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25b5f6a5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:31:37,845 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@272904a9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/hadoop.log.dir/,STOPPED} 2024-11-10T06:31:37,847 WARN [BP-741755570-172.17.0.2-1731220253746 heartbeating to localhost/127.0.0.1:40625 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:31:37,847 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:31:37,847 WARN [BP-741755570-172.17.0.2-1731220253746 heartbeating to localhost/127.0.0.1:40625 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-741755570-172.17.0.2-1731220253746 (Datanode Uuid e19f1e86-46e6-4f0d-b75d-d7b0b9a2c8c8) service to localhost/127.0.0.1:40625 2024-11-10T06:31:37,847 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:31:37,846 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3f25a7b0 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-741755570-172.17.0.2-1731220253746:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:38361,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:39113 , LocalHost:localPort 4999977c7e1b/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-10T06:31:37,847 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3f25a7b0 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-741755570-172.17.0.2-1731220253746:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:35019,null,null], DatanodeInfoWithStorage[127.0.0.1:38361,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-741755570-172.17.0.2-1731220253746 2024-11-10T06:31:37,847 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3f25a7b0 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-741755570-172.17.0.2-1731220253746:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:35019,null,null]) java.io.IOException: No block pool offer service for bpid=BP-741755570-172.17.0.2-1731220253746 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:37,848 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data3/current/BP-741755570-172.17.0.2-1731220253746 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:31:37,848 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3f25a7b0 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-741755570-172.17.0.2-1731220253746:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:38361,null,null]) java.io.IOException: No block pool offer service for bpid=BP-741755570-172.17.0.2-1731220253746 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:37,848 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3f25a7b0 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-741755570-172.17.0.2-1731220253746:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:35019,null,null], DatanodeInfoWithStorage[127.0.0.1:38361,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-741755570-172.17.0.2-1731220253746:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:35019,null,null], DatanodeInfoWithStorage[127.0.0.1:38361,null,null]] 2024-11-10T06:31:37,848 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data4/current/BP-741755570-172.17.0.2-1731220253746 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:31:37,848 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:31:37,850 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@13a31f2a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:31:37,851 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@37073a3f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:31:37,851 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:31:37,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6ee7700a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:31:37,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a3ddcf2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/hadoop.log.dir/,STOPPED} 2024-11-10T06:31:37,852 WARN [BP-741755570-172.17.0.2-1731220253746 heartbeating to localhost/127.0.0.1:40625 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:31:37,852 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:31:37,852 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:31:37,852 WARN [BP-741755570-172.17.0.2-1731220253746 heartbeating to localhost/127.0.0.1:40625 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-741755570-172.17.0.2-1731220253746 (Datanode Uuid b468639d-7b11-45e5-9a2a-01e054eead7f) service to localhost/127.0.0.1:40625 2024-11-10T06:31:37,853 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data7/current/BP-741755570-172.17.0.2-1731220253746 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:31:37,853 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/cluster_5fdca2fe-ef15-fd1f-92b0-2c2290a10993/data/data8/current/BP-741755570-172.17.0.2-1731220253746 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:31:37,854 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:31:37,859 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@618abfb4{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T06:31:37,860 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1585275{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:31:37,860 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:31:37,860 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7447e09f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:31:37,860 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3789f604{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/hadoop.log.dir/,STOPPED} 2024-11-10T06:31:37,869 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-10T06:31:37,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-10T06:31:37,908 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=153 (was 79) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:40625 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:44957 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:40625 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f328cbefa48.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f328cbefa48.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:40625 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40625 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40625 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:40625 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44957 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40625 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40625 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:40625 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40625 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:40625 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=3 (was 7), ProcessCount=11 (was 11), AvailableMemoryMB=7192 (was 7628) 2024-11-10T06:31:37,916 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=153, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=3, ProcessCount=11, AvailableMemoryMB=7191 2024-11-10T06:31:37,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T06:31:37,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/hadoop.log.dir so I do NOT create it in target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f 2024-11-10T06:31:37,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9785572f-cfab-81d7-23d2-631797d24c96/hadoop.tmp.dir so I do NOT create it in target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f 2024-11-10T06:31:37,916 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/cluster_b8c61b44-28e1-05fd-cd2c-adcbaca76d13, deleteOnExit=true 2024-11-10T06:31:37,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-10T06:31:37,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/test.cache.data in system properties and HBase conf 2024-11-10T06:31:37,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T06:31:37,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/hadoop.log.dir in system properties and HBase conf 2024-11-10T06:31:37,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T06:31:37,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T06:31:37,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T06:31:37,918 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T06:31:37,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T06:31:37,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T06:31:37,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T06:31:37,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T06:31:37,919 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T06:31:37,919 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T06:31:37,919 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T06:31:37,919 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T06:31:37,919 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T06:31:37,919 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/nfs.dump.dir in system properties and HBase conf 2024-11-10T06:31:37,919 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/java.io.tmpdir in system properties and HBase conf 2024-11-10T06:31:37,919 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T06:31:37,920 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T06:31:37,920 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T06:31:37,923 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-10T06:31:37,934 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T06:31:37,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:37,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:37,939 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:37,939 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:37,940 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:37,940 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:37,945 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:37,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:37,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:37,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:38,011 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:31:38,016 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:31:38,017 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:31:38,017 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:31:38,017 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T06:31:38,018 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:31:38,018 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70fc3167{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:31:38,018 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49dea66d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:31:38,133 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@278324d2{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/java.io.tmpdir/jetty-localhost-46063-hadoop-hdfs-3_4_1-tests_jar-_-any-2135027954121489683/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T06:31:38,134 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2b3cd21a{HTTP/1.1, (http/1.1)}{localhost:46063} 2024-11-10T06:31:38,134 INFO [Time-limited test {}] server.Server(415): Started @147835ms 2024-11-10T06:31:38,147 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T06:31:38,215 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:31:38,218 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:31:38,219 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:31:38,219 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:31:38,219 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T06:31:38,220 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68390467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:31:38,220 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8bbb4f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:31:38,336 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3a93c2c3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/java.io.tmpdir/jetty-localhost-46249-hadoop-hdfs-3_4_1-tests_jar-_-any-7611977699207130778/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:31:38,336 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4181c46d{HTTP/1.1, (http/1.1)}{localhost:46249} 2024-11-10T06:31:38,337 INFO [Time-limited test {}] server.Server(415): Started @148037ms 2024-11-10T06:31:38,338 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:31:38,368 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:31:38,371 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:31:38,373 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:31:38,373 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:31:38,373 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T06:31:38,376 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5af08dcb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:31:38,376 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ce237cc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:31:38,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:38,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:38,431 WARN [Thread-1184 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/cluster_b8c61b44-28e1-05fd-cd2c-adcbaca76d13/data/data1/current/BP-794254838-172.17.0.2-1731220297963/current, will proceed with Du for space computation calculation, 2024-11-10T06:31:38,431 WARN [Thread-1185 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/cluster_b8c61b44-28e1-05fd-cd2c-adcbaca76d13/data/data2/current/BP-794254838-172.17.0.2-1731220297963/current, will proceed with Du for space computation calculation, 2024-11-10T06:31:38,453 WARN [Thread-1163 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:31:38,456 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6a864234bcc54ece with lease ID 0x87b769e5337cfd6: Processing first storage report for DS-2eadd24d-1464-49b5-8718-50614ae089fd from datanode DatanodeRegistration(127.0.0.1:35743, datanodeUuid=c04487f6-9a8d-4e08-a52b-3c677128c1e8, infoPort=44587, infoSecurePort=0, ipcPort=33369, storageInfo=lv=-57;cid=testClusterID;nsid=1631496314;c=1731220297963) 2024-11-10T06:31:38,456 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6a864234bcc54ece with lease ID 0x87b769e5337cfd6: from storage DS-2eadd24d-1464-49b5-8718-50614ae089fd node DatanodeRegistration(127.0.0.1:35743, datanodeUuid=c04487f6-9a8d-4e08-a52b-3c677128c1e8, infoPort=44587, infoSecurePort=0, ipcPort=33369, storageInfo=lv=-57;cid=testClusterID;nsid=1631496314;c=1731220297963), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:31:38,456 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6a864234bcc54ece with lease ID 0x87b769e5337cfd6: Processing first storage report for DS-ae13d092-e83d-4983-aa6c-2307a1abce4f from datanode DatanodeRegistration(127.0.0.1:35743, datanodeUuid=c04487f6-9a8d-4e08-a52b-3c677128c1e8, infoPort=44587, infoSecurePort=0, ipcPort=33369, storageInfo=lv=-57;cid=testClusterID;nsid=1631496314;c=1731220297963) 2024-11-10T06:31:38,456 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6a864234bcc54ece with lease ID 0x87b769e5337cfd6: from storage DS-ae13d092-e83d-4983-aa6c-2307a1abce4f node DatanodeRegistration(127.0.0.1:35743, datanodeUuid=c04487f6-9a8d-4e08-a52b-3c677128c1e8, infoPort=44587, infoSecurePort=0, ipcPort=33369, storageInfo=lv=-57;cid=testClusterID;nsid=1631496314;c=1731220297963), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:31:38,501 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1749af23{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/java.io.tmpdir/jetty-localhost-36851-hadoop-hdfs-3_4_1-tests_jar-_-any-6037607560884274551/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:31:38,502 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@73abd7bc{HTTP/1.1, (http/1.1)}{localhost:36851} 2024-11-10T06:31:38,502 INFO [Time-limited test {}] server.Server(415): Started @148203ms 2024-11-10T06:31:38,503 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:31:38,595 WARN [Thread-1210 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/cluster_b8c61b44-28e1-05fd-cd2c-adcbaca76d13/data/data3/current/BP-794254838-172.17.0.2-1731220297963/current, will proceed with Du for space computation calculation, 2024-11-10T06:31:38,595 WARN [Thread-1211 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/cluster_b8c61b44-28e1-05fd-cd2c-adcbaca76d13/data/data4/current/BP-794254838-172.17.0.2-1731220297963/current, will proceed with Du for space computation calculation, 2024-11-10T06:31:38,612 WARN [Thread-1199 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:31:38,614 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc7c627b7ccb77d78 with lease ID 0x87b769e5337cfd7: Processing first storage report for DS-cb7ce07e-c63c-4f64-9d36-1e09e417590d from datanode DatanodeRegistration(127.0.0.1:37235, datanodeUuid=3b149b59-c879-4029-a86f-ab369f901f70, infoPort=36137, infoSecurePort=0, ipcPort=40565, storageInfo=lv=-57;cid=testClusterID;nsid=1631496314;c=1731220297963) 2024-11-10T06:31:38,614 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc7c627b7ccb77d78 with lease ID 0x87b769e5337cfd7: from storage DS-cb7ce07e-c63c-4f64-9d36-1e09e417590d node DatanodeRegistration(127.0.0.1:37235, datanodeUuid=3b149b59-c879-4029-a86f-ab369f901f70, infoPort=36137, infoSecurePort=0, ipcPort=40565, storageInfo=lv=-57;cid=testClusterID;nsid=1631496314;c=1731220297963), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:31:38,614 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc7c627b7ccb77d78 with lease ID 0x87b769e5337cfd7: Processing first storage report for DS-552a8a2a-2aa9-485d-834b-d9a8ab8a8342 from datanode DatanodeRegistration(127.0.0.1:37235, datanodeUuid=3b149b59-c879-4029-a86f-ab369f901f70, infoPort=36137, infoSecurePort=0, ipcPort=40565, storageInfo=lv=-57;cid=testClusterID;nsid=1631496314;c=1731220297963) 2024-11-10T06:31:38,614 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc7c627b7ccb77d78 with lease ID 0x87b769e5337cfd7: from storage DS-552a8a2a-2aa9-485d-834b-d9a8ab8a8342 node DatanodeRegistration(127.0.0.1:37235, datanodeUuid=3b149b59-c879-4029-a86f-ab369f901f70, infoPort=36137, infoSecurePort=0, ipcPort=40565, storageInfo=lv=-57;cid=testClusterID;nsid=1631496314;c=1731220297963), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:31:38,627 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f 2024-11-10T06:31:38,629 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/cluster_b8c61b44-28e1-05fd-cd2c-adcbaca76d13/zookeeper_0, clientPort=55423, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/cluster_b8c61b44-28e1-05fd-cd2c-adcbaca76d13/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/cluster_b8c61b44-28e1-05fd-cd2c-adcbaca76d13/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T06:31:38,630 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55423 2024-11-10T06:31:38,630 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:31:38,632 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:31:38,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35743 is added to blk_1073741825_1001 (size=7) 2024-11-10T06:31:38,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37235 is added to blk_1073741825_1001 (size=7) 2024-11-10T06:31:38,642 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815 with version=8 2024-11-10T06:31:38,642 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/hbase-staging 2024-11-10T06:31:38,644 INFO [Time-limited test {}] client.ConnectionUtils(128): master/4999977c7e1b:0 server-side Connection retries=45 2024-11-10T06:31:38,644 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:31:38,644 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T06:31:38,644 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T06:31:38,644 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:31:38,644 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T06:31:38,644 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T06:31:38,644 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T06:31:38,645 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35461 2024-11-10T06:31:38,646 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35461 connecting to ZooKeeper ensemble=127.0.0.1:55423 2024-11-10T06:31:38,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:354610x0, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T06:31:38,653 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35461-0x10190e0d0ed0000 connected 2024-11-10T06:31:38,669 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:31:38,671 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:31:38,672 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:31:38,672 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815, hbase.cluster.distributed=false 2024-11-10T06:31:38,674 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T06:31:38,674 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35461 2024-11-10T06:31:38,674 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35461 2024-11-10T06:31:38,675 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35461 2024-11-10T06:31:38,675 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35461 2024-11-10T06:31:38,675 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35461 2024-11-10T06:31:38,691 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4999977c7e1b:0 server-side Connection retries=45 2024-11-10T06:31:38,691 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:31:38,691 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T06:31:38,691 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T06:31:38,691 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:31:38,691 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T06:31:38,691 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T06:31:38,691 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T06:31:38,692 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43987 2024-11-10T06:31:38,693 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43987 connecting to ZooKeeper ensemble=127.0.0.1:55423 2024-11-10T06:31:38,693 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:31:38,695 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:31:38,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:439870x0, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T06:31:38,700 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:439870x0, quorum=127.0.0.1:55423, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:31:38,700 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43987-0x10190e0d0ed0001 connected 2024-11-10T06:31:38,700 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T06:31:38,701 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T06:31:38,701 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43987-0x10190e0d0ed0001, quorum=127.0.0.1:55423, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T06:31:38,702 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43987-0x10190e0d0ed0001, quorum=127.0.0.1:55423, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T06:31:38,703 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43987 2024-11-10T06:31:38,703 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43987 2024-11-10T06:31:38,703 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43987 2024-11-10T06:31:38,705 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43987 2024-11-10T06:31:38,707 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43987 2024-11-10T06:31:38,720 DEBUG [M:0;4999977c7e1b:35461 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;4999977c7e1b:35461 2024-11-10T06:31:38,720 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/4999977c7e1b,35461,1731220298644 2024-11-10T06:31:38,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43987-0x10190e0d0ed0001, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:31:38,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:31:38,722 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/4999977c7e1b,35461,1731220298644 2024-11-10T06:31:38,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43987-0x10190e0d0ed0001, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T06:31:38,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:31:38,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43987-0x10190e0d0ed0001, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:31:38,725 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T06:31:38,725 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/4999977c7e1b,35461,1731220298644 from backup master directory 2024-11-10T06:31:38,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/4999977c7e1b,35461,1731220298644 2024-11-10T06:31:38,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43987-0x10190e0d0ed0001, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:31:38,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:31:38,728 WARN [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T06:31:38,728 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=4999977c7e1b,35461,1731220298644 2024-11-10T06:31:38,732 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/hbase.id] with ID: feeffcdd-00bc-4705-aa6e-341e71090e60 2024-11-10T06:31:38,732 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/.tmp/hbase.id 2024-11-10T06:31:38,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37235 is added to blk_1073741826_1002 (size=42) 2024-11-10T06:31:38,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35743 is added to blk_1073741826_1002 (size=42) 2024-11-10T06:31:38,739 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/.tmp/hbase.id]:[hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/hbase.id] 2024-11-10T06:31:38,752 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:31:38,752 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T06:31:38,754 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-10T06:31:38,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:31:38,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43987-0x10190e0d0ed0001, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:31:38,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37235 is added to blk_1073741827_1003 (size=196) 2024-11-10T06:31:38,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35743 is added to blk_1073741827_1003 (size=196) 2024-11-10T06:31:38,766 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T06:31:38,767 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T06:31:38,767 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:31:38,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37235 is added to blk_1073741828_1004 (size=1189) 2024-11-10T06:31:38,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35743 is added to blk_1073741828_1004 (size=1189) 2024-11-10T06:31:39,177 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store 2024-11-10T06:31:39,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35743 is added to blk_1073741829_1005 (size=34) 2024-11-10T06:31:39,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37235 is added to blk_1073741829_1005 (size=34) 2024-11-10T06:31:39,188 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:31:39,188 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T06:31:39,188 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:31:39,188 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:31:39,188 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T06:31:39,188 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:31:39,188 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:31:39,188 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731220299188Disabling compacts and flushes for region at 1731220299188Disabling writes for close at 1731220299188Writing region close event to WAL at 1731220299188Closed at 1731220299188 2024-11-10T06:31:39,189 WARN [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/.initializing 2024-11-10T06:31:39,190 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/WALs/4999977c7e1b,35461,1731220298644 2024-11-10T06:31:39,192 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C35461%2C1731220298644, suffix=, logDir=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/WALs/4999977c7e1b,35461,1731220298644, archiveDir=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/oldWALs, maxLogs=10 2024-11-10T06:31:39,193 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C35461%2C1731220298644.1731220299192 2024-11-10T06:31:39,197 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/WALs/4999977c7e1b,35461,1731220298644/4999977c7e1b%2C35461%2C1731220298644.1731220299192 2024-11-10T06:31:39,202 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44587:44587),(127.0.0.1/127.0.0.1:36137:36137)] 2024-11-10T06:31:39,205 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T06:31:39,205 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:31:39,205 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:31:39,205 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:31:39,207 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:31:39,208 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T06:31:39,208 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:31:39,209 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:31:39,209 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:31:39,210 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T06:31:39,210 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:31:39,210 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:31:39,210 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:31:39,212 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T06:31:39,212 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:31:39,212 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:31:39,212 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:31:39,213 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T06:31:39,213 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:31:39,214 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:31:39,214 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:31:39,214 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:31:39,215 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:31:39,216 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:31:39,216 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:31:39,217 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T06:31:39,218 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:31:39,220 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T06:31:39,221 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=727549, jitterRate=-0.07487456500530243}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T06:31:39,221 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731220299205Initializing all the Stores at 1731220299206 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220299206Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220299206Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220299206Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220299206Cleaning up temporary data from old regions at 1731220299216 (+10 ms)Region opened successfully at 1731220299221 (+5 ms) 2024-11-10T06:31:39,221 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T06:31:39,225 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d38db6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4999977c7e1b/172.17.0.2:0 2024-11-10T06:31:39,226 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T06:31:39,226 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T06:31:39,226 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T06:31:39,226 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T06:31:39,226 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-10T06:31:39,227 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-10T06:31:39,227 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T06:31:39,231 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T06:31:39,232 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T06:31:39,233 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T06:31:39,234 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T06:31:39,234 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T06:31:39,236 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T06:31:39,236 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T06:31:39,240 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T06:31:39,241 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T06:31:39,242 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T06:31:39,244 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T06:31:39,246 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T06:31:39,247 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T06:31:39,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T06:31:39,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43987-0x10190e0d0ed0001, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T06:31:39,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:31:39,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43987-0x10190e0d0ed0001, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:31:39,250 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=4999977c7e1b,35461,1731220298644, sessionid=0x10190e0d0ed0000, setting cluster-up flag (Was=false) 2024-11-10T06:31:39,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:31:39,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43987-0x10190e0d0ed0001, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:31:39,259 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T06:31:39,261 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4999977c7e1b,35461,1731220298644 2024-11-10T06:31:39,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:31:39,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43987-0x10190e0d0ed0001, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:31:39,272 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T06:31:39,273 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4999977c7e1b,35461,1731220298644 2024-11-10T06:31:39,274 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T06:31:39,276 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T06:31:39,276 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T06:31:39,276 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T06:31:39,277 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 4999977c7e1b,35461,1731220298644 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T06:31:39,278 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:31:39,278 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:31:39,278 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:31:39,278 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:31:39,278 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/4999977c7e1b:0, corePoolSize=10, maxPoolSize=10 2024-11-10T06:31:39,278 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:31:39,278 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/4999977c7e1b:0, corePoolSize=2, maxPoolSize=2 2024-11-10T06:31:39,278 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:31:39,280 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731220329280 2024-11-10T06:31:39,280 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T06:31:39,280 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T06:31:39,280 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T06:31:39,280 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T06:31:39,280 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T06:31:39,280 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T06:31:39,280 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:39,280 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:31:39,281 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T06:31:39,281 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T06:31:39,281 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T06:31:39,281 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T06:31:39,282 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:31:39,282 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T06:31:39,284 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T06:31:39,284 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T06:31:39,285 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220299284,5,FailOnTimeoutGroup] 2024-11-10T06:31:39,286 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220299285,5,FailOnTimeoutGroup] 2024-11-10T06:31:39,286 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:39,286 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T06:31:39,286 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:39,286 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:39,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37235 is added to blk_1073741831_1007 (size=1321) 2024-11-10T06:31:39,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35743 is added to blk_1073741831_1007 (size=1321) 2024-11-10T06:31:39,293 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T06:31:39,293 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815 2024-11-10T06:31:39,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35743 is added to blk_1073741832_1008 (size=32) 2024-11-10T06:31:39,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37235 is added to blk_1073741832_1008 (size=32) 2024-11-10T06:31:39,304 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:31:39,305 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T06:31:39,307 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T06:31:39,307 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:31:39,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:31:39,308 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T06:31:39,309 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T06:31:39,309 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:31:39,309 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.HRegionServer(746): ClusterId : feeffcdd-00bc-4705-aa6e-341e71090e60 2024-11-10T06:31:39,309 DEBUG [RS:0;4999977c7e1b:43987 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T06:31:39,310 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:31:39,310 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T06:31:39,311 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T06:31:39,311 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:31:39,312 DEBUG [RS:0;4999977c7e1b:43987 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T06:31:39,312 DEBUG [RS:0;4999977c7e1b:43987 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T06:31:39,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:31:39,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T06:31:39,313 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T06:31:39,313 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:31:39,314 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:31:39,314 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T06:31:39,314 DEBUG [RS:0;4999977c7e1b:43987 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T06:31:39,314 DEBUG [RS:0;4999977c7e1b:43987 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e7b7b8e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4999977c7e1b/172.17.0.2:0 2024-11-10T06:31:39,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740 2024-11-10T06:31:39,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740 2024-11-10T06:31:39,316 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T06:31:39,316 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T06:31:39,317 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T06:31:39,318 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T06:31:39,320 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T06:31:39,320 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=696766, jitterRate=-0.11401697993278503}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T06:31:39,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731220299304Initializing all the Stores at 1731220299305 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220299305Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220299305Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220299305Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220299305Cleaning up temporary data from old regions at 1731220299316 (+11 ms)Region opened successfully at 1731220299321 (+5 ms) 2024-11-10T06:31:39,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T06:31:39,321 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T06:31:39,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T06:31:39,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T06:31:39,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T06:31:39,322 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T06:31:39,322 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731220299321Disabling compacts and flushes for region at 1731220299321Disabling writes for close at 1731220299321Writing region close event to WAL at 1731220299322 (+1 ms)Closed at 1731220299322 2024-11-10T06:31:39,323 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:31:39,323 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T06:31:39,323 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T06:31:39,325 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T06:31:39,326 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T06:31:39,333 DEBUG [RS:0;4999977c7e1b:43987 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;4999977c7e1b:43987 2024-11-10T06:31:39,334 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T06:31:39,334 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T06:31:39,334 DEBUG [RS:0;4999977c7e1b:43987 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T06:31:39,335 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.HRegionServer(2659): reportForDuty to master=4999977c7e1b,35461,1731220298644 with port=43987, startcode=1731220298690 2024-11-10T06:31:39,335 DEBUG [RS:0;4999977c7e1b:43987 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T06:31:39,337 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48749, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T06:31:39,337 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35461 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4999977c7e1b,43987,1731220298690 2024-11-10T06:31:39,338 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35461 {}] master.ServerManager(517): Registering regionserver=4999977c7e1b,43987,1731220298690 2024-11-10T06:31:39,339 DEBUG [RS:0;4999977c7e1b:43987 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815 2024-11-10T06:31:39,339 DEBUG [RS:0;4999977c7e1b:43987 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44261 2024-11-10T06:31:39,339 DEBUG [RS:0;4999977c7e1b:43987 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T06:31:39,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T06:31:39,342 DEBUG [RS:0;4999977c7e1b:43987 {}] zookeeper.ZKUtil(111): regionserver:43987-0x10190e0d0ed0001, quorum=127.0.0.1:55423, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4999977c7e1b,43987,1731220298690 2024-11-10T06:31:39,342 WARN [RS:0;4999977c7e1b:43987 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T06:31:39,342 INFO [RS:0;4999977c7e1b:43987 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:31:39,343 DEBUG [RS:0;4999977c7e1b:43987 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690 2024-11-10T06:31:39,343 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4999977c7e1b,43987,1731220298690] 2024-11-10T06:31:39,346 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T06:31:39,347 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T06:31:39,348 INFO [RS:0;4999977c7e1b:43987 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T06:31:39,348 INFO [RS:0;4999977c7e1b:43987 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:39,348 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T06:31:39,349 INFO [RS:0;4999977c7e1b:43987 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T06:31:39,349 INFO [RS:0;4999977c7e1b:43987 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:39,349 DEBUG [RS:0;4999977c7e1b:43987 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:31:39,349 DEBUG [RS:0;4999977c7e1b:43987 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:31:39,349 DEBUG [RS:0;4999977c7e1b:43987 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:31:39,349 DEBUG [RS:0;4999977c7e1b:43987 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:31:39,349 DEBUG [RS:0;4999977c7e1b:43987 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:31:39,349 DEBUG [RS:0;4999977c7e1b:43987 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4999977c7e1b:0, corePoolSize=2, maxPoolSize=2 2024-11-10T06:31:39,349 DEBUG [RS:0;4999977c7e1b:43987 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:31:39,349 DEBUG [RS:0;4999977c7e1b:43987 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:31:39,349 DEBUG [RS:0;4999977c7e1b:43987 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:31:39,350 DEBUG [RS:0;4999977c7e1b:43987 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:31:39,350 DEBUG [RS:0;4999977c7e1b:43987 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:31:39,350 DEBUG [RS:0;4999977c7e1b:43987 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:31:39,350 DEBUG [RS:0;4999977c7e1b:43987 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4999977c7e1b:0, corePoolSize=3, maxPoolSize=3 2024-11-10T06:31:39,350 DEBUG [RS:0;4999977c7e1b:43987 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0, corePoolSize=3, maxPoolSize=3 2024-11-10T06:31:39,351 INFO [RS:0;4999977c7e1b:43987 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:39,351 INFO [RS:0;4999977c7e1b:43987 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:39,351 INFO [RS:0;4999977c7e1b:43987 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:39,351 INFO [RS:0;4999977c7e1b:43987 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:39,351 INFO [RS:0;4999977c7e1b:43987 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:39,351 INFO [RS:0;4999977c7e1b:43987 {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,43987,1731220298690-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T06:31:39,366 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T06:31:39,366 INFO [RS:0;4999977c7e1b:43987 {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,43987,1731220298690-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:39,366 INFO [RS:0;4999977c7e1b:43987 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:39,366 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.Replication(171): 4999977c7e1b,43987,1731220298690 started 2024-11-10T06:31:39,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:39,380 INFO [RS:0;4999977c7e1b:43987 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:39,380 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.HRegionServer(1482): Serving as 4999977c7e1b,43987,1731220298690, RpcServer on 4999977c7e1b/172.17.0.2:43987, sessionid=0x10190e0d0ed0001 2024-11-10T06:31:39,380 DEBUG [RS:0;4999977c7e1b:43987 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T06:31:39,380 DEBUG [RS:0;4999977c7e1b:43987 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4999977c7e1b,43987,1731220298690 2024-11-10T06:31:39,380 DEBUG [RS:0;4999977c7e1b:43987 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4999977c7e1b,43987,1731220298690' 2024-11-10T06:31:39,380 DEBUG [RS:0;4999977c7e1b:43987 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T06:31:39,381 DEBUG [RS:0;4999977c7e1b:43987 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T06:31:39,381 DEBUG [RS:0;4999977c7e1b:43987 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T06:31:39,381 DEBUG [RS:0;4999977c7e1b:43987 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T06:31:39,381 DEBUG [RS:0;4999977c7e1b:43987 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4999977c7e1b,43987,1731220298690 2024-11-10T06:31:39,381 DEBUG [RS:0;4999977c7e1b:43987 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4999977c7e1b,43987,1731220298690' 2024-11-10T06:31:39,381 DEBUG [RS:0;4999977c7e1b:43987 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T06:31:39,381 DEBUG [RS:0;4999977c7e1b:43987 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T06:31:39,382 DEBUG [RS:0;4999977c7e1b:43987 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T06:31:39,382 INFO [RS:0;4999977c7e1b:43987 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T06:31:39,382 INFO [RS:0;4999977c7e1b:43987 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T06:31:39,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:39,476 WARN [4999977c7e1b:35461 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-10T06:31:39,484 INFO [RS:0;4999977c7e1b:43987 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C43987%2C1731220298690, suffix=, logDir=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690, archiveDir=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/oldWALs, maxLogs=32 2024-11-10T06:31:39,485 INFO [RS:0;4999977c7e1b:43987 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C43987%2C1731220298690.1731220299484 2024-11-10T06:31:39,491 INFO [RS:0;4999977c7e1b:43987 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220299484 2024-11-10T06:31:39,492 DEBUG [RS:0;4999977c7e1b:43987 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44587:44587),(127.0.0.1/127.0.0.1:36137:36137)] 2024-11-10T06:31:39,726 DEBUG [4999977c7e1b:35461 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-10T06:31:39,727 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=4999977c7e1b,43987,1731220298690 2024-11-10T06:31:39,729 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4999977c7e1b,43987,1731220298690, state=OPENING 2024-11-10T06:31:39,730 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T06:31:39,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:31:39,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43987-0x10190e0d0ed0001, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:31:39,732 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T06:31:39,732 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:31:39,732 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:31:39,732 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=4999977c7e1b,43987,1731220298690}] 2024-11-10T06:31:39,885 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T06:31:39,888 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35235, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T06:31:39,891 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T06:31:39,891 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:31:39,893 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C43987%2C1731220298690.meta, suffix=.meta, logDir=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690, archiveDir=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/oldWALs, maxLogs=32 2024-11-10T06:31:39,894 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C43987%2C1731220298690.meta.1731220299894.meta 2024-11-10T06:31:39,899 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.meta.1731220299894.meta 2024-11-10T06:31:39,899 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36137:36137),(127.0.0.1/127.0.0.1:44587:44587)] 2024-11-10T06:31:39,900 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T06:31:39,900 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T06:31:39,900 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T06:31:39,901 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T06:31:39,901 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T06:31:39,901 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:31:39,901 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T06:31:39,901 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T06:31:39,902 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T06:31:39,903 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T06:31:39,903 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:31:39,904 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:31:39,904 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T06:31:39,905 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T06:31:39,905 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:31:39,905 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:31:39,905 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T06:31:39,906 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T06:31:39,906 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:31:39,907 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:31:39,907 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T06:31:39,907 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T06:31:39,907 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:31:39,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:31:39,908 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T06:31:39,909 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740 2024-11-10T06:31:39,910 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740 2024-11-10T06:31:39,911 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T06:31:39,911 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T06:31:39,911 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T06:31:39,912 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T06:31:39,913 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=740343, jitterRate=-0.05860640108585358}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T06:31:39,913 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T06:31:39,914 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731220299901Writing region info on filesystem at 1731220299901Initializing all the Stores at 1731220299902 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220299902Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220299902Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220299902Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220299902Cleaning up temporary data from old regions at 1731220299911 (+9 ms)Running coprocessor post-open hooks at 1731220299913 (+2 ms)Region opened successfully at 1731220299914 (+1 ms) 2024-11-10T06:31:39,915 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731220299885 2024-11-10T06:31:39,917 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T06:31:39,917 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T06:31:39,918 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=4999977c7e1b,43987,1731220298690 2024-11-10T06:31:39,919 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4999977c7e1b,43987,1731220298690, state=OPEN 2024-11-10T06:31:39,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T06:31:39,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43987-0x10190e0d0ed0001, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T06:31:39,924 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=4999977c7e1b,43987,1731220298690 2024-11-10T06:31:39,924 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:31:39,924 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:31:39,927 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T06:31:39,927 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=4999977c7e1b,43987,1731220298690 in 192 msec 2024-11-10T06:31:39,929 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T06:31:39,929 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 604 msec 2024-11-10T06:31:39,930 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:31:39,930 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T06:31:39,932 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T06:31:39,932 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4999977c7e1b,43987,1731220298690, seqNum=-1] 2024-11-10T06:31:39,932 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T06:31:39,933 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45873, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T06:31:39,938 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 662 msec 2024-11-10T06:31:39,939 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731220299939, completionTime=-1 2024-11-10T06:31:39,939 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-10T06:31:39,939 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-10T06:31:39,940 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-10T06:31:39,940 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731220359940 2024-11-10T06:31:39,940 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731220419940 2024-11-10T06:31:39,940 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-10T06:31:39,941 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,35461,1731220298644-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:39,941 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,35461,1731220298644-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:39,941 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,35461,1731220298644-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:39,941 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-4999977c7e1b:35461, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:39,941 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:39,941 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:39,942 DEBUG [master/4999977c7e1b:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T06:31:39,944 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.216sec 2024-11-10T06:31:39,944 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T06:31:39,944 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T06:31:39,944 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T06:31:39,944 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T06:31:39,944 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T06:31:39,945 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,35461,1731220298644-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T06:31:39,945 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,35461,1731220298644-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T06:31:39,947 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T06:31:39,947 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T06:31:39,947 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,35461,1731220298644-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:31:40,010 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@dc0d855, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:31:40,010 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 4999977c7e1b,35461,-1 for getting cluster id 2024-11-10T06:31:40,010 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T06:31:40,012 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'feeffcdd-00bc-4705-aa6e-341e71090e60' 2024-11-10T06:31:40,012 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T06:31:40,012 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "feeffcdd-00bc-4705-aa6e-341e71090e60" 2024-11-10T06:31:40,013 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cab59bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:31:40,013 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4999977c7e1b,35461,-1] 2024-11-10T06:31:40,013 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T06:31:40,013 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:31:40,015 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40914, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T06:31:40,016 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d408466, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:31:40,016 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T06:31:40,017 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4999977c7e1b,43987,1731220298690, seqNum=-1] 2024-11-10T06:31:40,017 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T06:31:40,020 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52136, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T06:31:40,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=4999977c7e1b,35461,1731220298644 2024-11-10T06:31:40,022 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:31:40,025 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-10T06:31:40,025 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-10T06:31:40,026 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-10T06:31:40,026 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-10T06:31:40,027 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 4999977c7e1b,35461,1731220298644 2024-11-10T06:31:40,027 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@447e4cea 2024-11-10T06:31:40,027 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-10T06:31:40,028 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40928, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-10T06:31:40,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35461 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-10T06:31:40,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35461 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-10T06:31:40,029 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35461 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T06:31:40,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35461 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-10T06:31:40,032 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-10T06:31:40,032 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:31:40,032 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35461 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-10T06:31:40,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35461 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T06:31:40,033 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-10T06:31:40,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35743 is added to blk_1073741835_1011 (size=395) 2024-11-10T06:31:40,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37235 is added to blk_1073741835_1011 (size=395) 2024-11-10T06:31:40,042 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => db672abcf0d9b1980083110ed63ae26d, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815 2024-11-10T06:31:40,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35743 is added to blk_1073741836_1012 (size=78) 2024-11-10T06:31:40,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37235 is added to blk_1073741836_1012 (size=78) 2024-11-10T06:31:40,049 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:31:40,049 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing db672abcf0d9b1980083110ed63ae26d, disabling compactions & flushes 2024-11-10T06:31:40,049 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d. 2024-11-10T06:31:40,049 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d. 2024-11-10T06:31:40,049 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d. after waiting 0 ms 2024-11-10T06:31:40,049 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d. 2024-11-10T06:31:40,049 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d. 2024-11-10T06:31:40,049 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for db672abcf0d9b1980083110ed63ae26d: Waiting for close lock at 1731220300049Disabling compacts and flushes for region at 1731220300049Disabling writes for close at 1731220300049Writing region close event to WAL at 1731220300049Closed at 1731220300049 2024-11-10T06:31:40,051 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-10T06:31:40,051 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731220300051"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731220300051"}]},"ts":"1731220300051"} 2024-11-10T06:31:40,053 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-10T06:31:40,055 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-10T06:31:40,055 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731220300055"}]},"ts":"1731220300055"} 2024-11-10T06:31:40,057 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-10T06:31:40,057 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=db672abcf0d9b1980083110ed63ae26d, ASSIGN}] 2024-11-10T06:31:40,059 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=db672abcf0d9b1980083110ed63ae26d, ASSIGN 2024-11-10T06:31:40,060 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=db672abcf0d9b1980083110ed63ae26d, ASSIGN; state=OFFLINE, location=4999977c7e1b,43987,1731220298690; forceNewPlan=false, retain=false 2024-11-10T06:31:40,211 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=db672abcf0d9b1980083110ed63ae26d, regionState=OPENING, regionLocation=4999977c7e1b,43987,1731220298690 2024-11-10T06:31:40,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=db672abcf0d9b1980083110ed63ae26d, ASSIGN because future has completed 2024-11-10T06:31:40,214 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure db672abcf0d9b1980083110ed63ae26d, server=4999977c7e1b,43987,1731220298690}] 2024-11-10T06:31:40,371 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d. 2024-11-10T06:31:40,371 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => db672abcf0d9b1980083110ed63ae26d, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d.', STARTKEY => '', ENDKEY => ''} 2024-11-10T06:31:40,371 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart db672abcf0d9b1980083110ed63ae26d 2024-11-10T06:31:40,371 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:31:40,371 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for db672abcf0d9b1980083110ed63ae26d 2024-11-10T06:31:40,372 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for db672abcf0d9b1980083110ed63ae26d 2024-11-10T06:31:40,373 INFO [StoreOpener-db672abcf0d9b1980083110ed63ae26d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region db672abcf0d9b1980083110ed63ae26d 2024-11-10T06:31:40,374 INFO [StoreOpener-db672abcf0d9b1980083110ed63ae26d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region db672abcf0d9b1980083110ed63ae26d columnFamilyName info 2024-11-10T06:31:40,374 DEBUG [StoreOpener-db672abcf0d9b1980083110ed63ae26d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:31:40,375 INFO [StoreOpener-db672abcf0d9b1980083110ed63ae26d-1 {}] regionserver.HStore(327): Store=db672abcf0d9b1980083110ed63ae26d/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:31:40,375 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for db672abcf0d9b1980083110ed63ae26d 2024-11-10T06:31:40,376 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/default/TestLogRolling-testLogRollOnPipelineRestart/db672abcf0d9b1980083110ed63ae26d 2024-11-10T06:31:40,376 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/default/TestLogRolling-testLogRollOnPipelineRestart/db672abcf0d9b1980083110ed63ae26d 2024-11-10T06:31:40,376 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for db672abcf0d9b1980083110ed63ae26d 2024-11-10T06:31:40,376 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for db672abcf0d9b1980083110ed63ae26d 2024-11-10T06:31:40,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:40,378 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for db672abcf0d9b1980083110ed63ae26d 2024-11-10T06:31:40,380 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/default/TestLogRolling-testLogRollOnPipelineRestart/db672abcf0d9b1980083110ed63ae26d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T06:31:40,381 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened db672abcf0d9b1980083110ed63ae26d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=873141, jitterRate=0.110256627202034}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T06:31:40,381 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for db672abcf0d9b1980083110ed63ae26d 2024-11-10T06:31:40,381 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for db672abcf0d9b1980083110ed63ae26d: Running coprocessor pre-open hook at 1731220300372Writing region info on filesystem at 1731220300372Initializing all the Stores at 1731220300373 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220300373Cleaning up temporary data from old regions at 1731220300377 (+4 ms)Running coprocessor post-open hooks at 1731220300381 (+4 ms)Region opened successfully at 1731220300381 2024-11-10T06:31:40,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:40,383 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d., pid=6, masterSystemTime=1731220300367 2024-11-10T06:31:40,385 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d. 2024-11-10T06:31:40,386 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d. 2024-11-10T06:31:40,386 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=db672abcf0d9b1980083110ed63ae26d, regionState=OPEN, openSeqNum=2, regionLocation=4999977c7e1b,43987,1731220298690 2024-11-10T06:31:40,389 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure db672abcf0d9b1980083110ed63ae26d, server=4999977c7e1b,43987,1731220298690 because future has completed 2024-11-10T06:31:40,393 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-10T06:31:40,393 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure db672abcf0d9b1980083110ed63ae26d, server=4999977c7e1b,43987,1731220298690 in 176 msec 2024-11-10T06:31:40,396 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-10T06:31:40,396 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=db672abcf0d9b1980083110ed63ae26d, ASSIGN in 336 msec 2024-11-10T06:31:40,397 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-10T06:31:40,397 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731220300397"}]},"ts":"1731220300397"} 2024-11-10T06:31:40,399 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-10T06:31:40,400 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-10T06:31:40,402 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 371 msec 2024-11-10T06:31:41,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:41,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:42,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:42,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:43,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:43,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:44,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:44,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:45,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:45,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:45,403 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-10T06:31:45,415 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:45,415 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:45,415 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:45,416 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:45,416 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:45,416 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:45,419 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:45,419 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:45,419 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:45,422 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:31:45,428 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-10T06:31:45,428 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-10T06:31:45,498 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-10T06:31:45,498 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-10T06:31:45,499 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-10T06:31:45,499 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-10T06:31:45,500 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T06:31:45,500 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-10T06:31:46,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:46,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:47,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:47,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:48,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:48,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:49,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:49,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:50,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35461 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T06:31:50,076 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-10T06:31:50,076 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-10T06:31:50,079 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-10T06:31:50,080 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d. 2024-11-10T06:31:50,083 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d., hostname=4999977c7e1b,43987,1731220298690, seqNum=2] 2024-11-10T06:31:50,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:50,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:51,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:51,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:52,085 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220299484 2024-11-10T06:31:52,086 WARN [ResponseProcessor for block BP-794254838-172.17.0.2-1731220297963:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-794254838-172.17.0.2-1731220297963:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:52,086 WARN [ResponseProcessor for block BP-794254838-172.17.0.2-1731220297963:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-794254838-172.17.0.2-1731220297963:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-794254838-172.17.0.2-1731220297963:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:37235,DS-cb7ce07e-c63c-4f64-9d36-1e09e417590d,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:52,086 WARN [ResponseProcessor for block BP-794254838-172.17.0.2-1731220297963:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-794254838-172.17.0.2-1731220297963:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-794254838-172.17.0.2-1731220297963:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:37235,DS-cb7ce07e-c63c-4f64-9d36-1e09e417590d,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:52,087 WARN [DataStreamer for file /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/WALs/4999977c7e1b,35461,1731220298644/4999977c7e1b%2C35461%2C1731220298644.1731220299192 block BP-794254838-172.17.0.2-1731220297963:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-794254838-172.17.0.2-1731220297963:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35743,DS-2eadd24d-1464-49b5-8718-50614ae089fd,DISK], DatanodeInfoWithStorage[127.0.0.1:37235,DS-cb7ce07e-c63c-4f64-9d36-1e09e417590d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37235,DS-cb7ce07e-c63c-4f64-9d36-1e09e417590d,DISK]) is bad. 2024-11-10T06:31:52,087 WARN [DataStreamer for file /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220299484 block BP-794254838-172.17.0.2-1731220297963:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-794254838-172.17.0.2-1731220297963:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35743,DS-2eadd24d-1464-49b5-8718-50614ae089fd,DISK], DatanodeInfoWithStorage[127.0.0.1:37235,DS-cb7ce07e-c63c-4f64-9d36-1e09e417590d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37235,DS-cb7ce07e-c63c-4f64-9d36-1e09e417590d,DISK]) is bad. 2024-11-10T06:31:52,087 WARN [DataStreamer for file /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.meta.1731220299894.meta block BP-794254838-172.17.0.2-1731220297963:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-794254838-172.17.0.2-1731220297963:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37235,DS-cb7ce07e-c63c-4f64-9d36-1e09e417590d,DISK], DatanodeInfoWithStorage[127.0.0.1:35743,DS-2eadd24d-1464-49b5-8718-50614ae089fd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37235,DS-cb7ce07e-c63c-4f64-9d36-1e09e417590d,DISK]) is bad. 2024-11-10T06:31:52,087 WARN [PacketResponder: BP-794254838-172.17.0.2-1731220297963:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37235] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:52,087 WARN [PacketResponder: BP-794254838-172.17.0.2-1731220297963:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37235] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:52,087 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1806857954_22 at /127.0.0.1:54050 [Receiving block BP-794254838-172.17.0.2-1731220297963:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37235:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54050 dst: /127.0.0.1:37235 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:52,088 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1806857954_22 at /127.0.0.1:60246 [Receiving block BP-794254838-172.17.0.2-1731220297963:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35743:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60246 dst: /127.0.0.1:35743 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:52,088 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842138937_22 at /127.0.0.1:34226 [Receiving block BP-794254838-172.17.0.2-1731220297963:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35743:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34226 dst: /127.0.0.1:35743 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:52,088 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842138937_22 at /127.0.0.1:39848 [Receiving block BP-794254838-172.17.0.2-1731220297963:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37235:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39848 dst: /127.0.0.1:37235 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:52,088 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1806857954_22 at /127.0.0.1:34258 [Receiving block BP-794254838-172.17.0.2-1731220297963:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35743:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34258 dst: /127.0.0.1:35743 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:52,088 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1806857954_22 at /127.0.0.1:39888 [Receiving block BP-794254838-172.17.0.2-1731220297963:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37235:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39888 dst: /127.0.0.1:37235 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:52,093 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1749af23{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:31:52,094 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@73abd7bc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:31:52,094 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:31:52,094 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ce237cc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:31:52,094 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5af08dcb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/hadoop.log.dir/,STOPPED} 2024-11-10T06:31:52,095 WARN [BP-794254838-172.17.0.2-1731220297963 heartbeating to localhost/127.0.0.1:44261 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:31:52,095 WARN [BP-794254838-172.17.0.2-1731220297963 heartbeating to localhost/127.0.0.1:44261 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-794254838-172.17.0.2-1731220297963 (Datanode Uuid 3b149b59-c879-4029-a86f-ab369f901f70) service to localhost/127.0.0.1:44261 2024-11-10T06:31:52,096 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:31:52,096 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:31:52,096 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/cluster_b8c61b44-28e1-05fd-cd2c-adcbaca76d13/data/data3/current/BP-794254838-172.17.0.2-1731220297963 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:31:52,096 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/cluster_b8c61b44-28e1-05fd-cd2c-adcbaca76d13/data/data4/current/BP-794254838-172.17.0.2-1731220297963 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:31:52,097 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:31:52,104 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:31:52,107 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:31:52,108 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:31:52,108 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:31:52,108 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T06:31:52,109 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bd9bf17{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:31:52,109 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e976720{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:31:52,228 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b49c453{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/java.io.tmpdir/jetty-localhost-37347-hadoop-hdfs-3_4_1-tests_jar-_-any-17786271494771801970/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:31:52,228 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6618ad7a{HTTP/1.1, (http/1.1)}{localhost:37347} 2024-11-10T06:31:52,228 INFO [Time-limited test {}] server.Server(415): Started @161929ms 2024-11-10T06:31:52,230 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:31:52,254 WARN [ResponseProcessor for block BP-794254838-172.17.0.2-1731220297963:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-794254838-172.17.0.2-1731220297963:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:52,254 WARN [ResponseProcessor for block BP-794254838-172.17.0.2-1731220297963:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-794254838-172.17.0.2-1731220297963:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:52,254 WARN [ResponseProcessor for block BP-794254838-172.17.0.2-1731220297963:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-794254838-172.17.0.2-1731220297963:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:52,255 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1806857954_22 at /127.0.0.1:51222 [Receiving block BP-794254838-172.17.0.2-1731220297963:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35743:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51222 dst: /127.0.0.1:35743 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:52,255 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842138937_22 at /127.0.0.1:51212 [Receiving block BP-794254838-172.17.0.2-1731220297963:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35743:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51212 dst: /127.0.0.1:35743 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:52,255 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1806857954_22 at /127.0.0.1:51224 [Receiving block BP-794254838-172.17.0.2-1731220297963:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35743:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51224 dst: /127.0.0.1:35743 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:52,258 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3a93c2c3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:31:52,258 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4181c46d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:31:52,258 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:31:52,258 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8bbb4f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:31:52,258 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68390467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/hadoop.log.dir/,STOPPED} 2024-11-10T06:31:52,259 WARN [BP-794254838-172.17.0.2-1731220297963 heartbeating to localhost/127.0.0.1:44261 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:31:52,259 WARN [BP-794254838-172.17.0.2-1731220297963 heartbeating to localhost/127.0.0.1:44261 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-794254838-172.17.0.2-1731220297963 (Datanode Uuid c04487f6-9a8d-4e08-a52b-3c677128c1e8) service to localhost/127.0.0.1:44261 2024-11-10T06:31:52,260 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:31:52,260 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:31:52,260 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/cluster_b8c61b44-28e1-05fd-cd2c-adcbaca76d13/data/data1/current/BP-794254838-172.17.0.2-1731220297963 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:31:52,260 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/cluster_b8c61b44-28e1-05fd-cd2c-adcbaca76d13/data/data2/current/BP-794254838-172.17.0.2-1731220297963 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:31:52,261 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:31:52,269 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:31:52,272 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:31:52,273 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:31:52,273 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:31:52,273 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T06:31:52,273 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@92a7e51{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:31:52,274 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4075a25a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:31:52,331 WARN [Thread-1334 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:31:52,333 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3a9430f70af390a0 with lease ID 0x87b769e5337cfd8: from storage DS-cb7ce07e-c63c-4f64-9d36-1e09e417590d node DatanodeRegistration(127.0.0.1:38393, datanodeUuid=3b149b59-c879-4029-a86f-ab369f901f70, infoPort=37221, infoSecurePort=0, ipcPort=33357, storageInfo=lv=-57;cid=testClusterID;nsid=1631496314;c=1731220297963), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:31:52,334 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3a9430f70af390a0 with lease ID 0x87b769e5337cfd8: from storage DS-552a8a2a-2aa9-485d-834b-d9a8ab8a8342 node DatanodeRegistration(127.0.0.1:38393, datanodeUuid=3b149b59-c879-4029-a86f-ab369f901f70, infoPort=37221, infoSecurePort=0, ipcPort=33357, storageInfo=lv=-57;cid=testClusterID;nsid=1631496314;c=1731220297963), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:31:52,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:52,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:52,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3367bf77{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/java.io.tmpdir/jetty-localhost-33345-hadoop-hdfs-3_4_1-tests_jar-_-any-6587989381725467593/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:31:52,395 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@179bdcf2{HTTP/1.1, (http/1.1)}{localhost:33345} 2024-11-10T06:31:52,395 INFO [Time-limited test {}] server.Server(415): Started @162096ms 2024-11-10T06:31:52,397 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:31:52,481 WARN [Thread-1365 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:31:52,484 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa6b7323e2efd17ad with lease ID 0x87b769e5337cfd9: from storage DS-2eadd24d-1464-49b5-8718-50614ae089fd node DatanodeRegistration(127.0.0.1:36705, datanodeUuid=c04487f6-9a8d-4e08-a52b-3c677128c1e8, infoPort=39905, infoSecurePort=0, ipcPort=44357, storageInfo=lv=-57;cid=testClusterID;nsid=1631496314;c=1731220297963), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:31:52,484 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa6b7323e2efd17ad with lease ID 0x87b769e5337cfd9: from storage DS-ae13d092-e83d-4983-aa6c-2307a1abce4f node DatanodeRegistration(127.0.0.1:36705, datanodeUuid=c04487f6-9a8d-4e08-a52b-3c677128c1e8, infoPort=39905, infoSecurePort=0, ipcPort=44357, storageInfo=lv=-57;cid=testClusterID;nsid=1631496314;c=1731220297963), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:31:53,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:53,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:53,415 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-10T06:31:53,417 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-10T06:31:53,419 ERROR [FSHLog-0-hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815-prefix:4999977c7e1b,43987,1731220298690 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35743,DS-2eadd24d-1464-49b5-8718-50614ae089fd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:53,419 WARN [FSHLog-0-hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815-prefix:4999977c7e1b,43987,1731220298690 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35743,DS-2eadd24d-1464-49b5-8718-50614ae089fd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:53,419 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 4999977c7e1b%2C43987%2C1731220298690:(num 1731220299484) roll requested 2024-11-10T06:31:53,419 INFO [regionserver/4999977c7e1b:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C43987%2C1731220298690.1731220313419 2024-11-10T06:31:53,425 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220299484 newFile=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220313419 2024-11-10T06:31:53,426 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:53,426 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:53,426 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:53,426 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:53,426 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:53,426 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220299484 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220313419 2024-11-10T06:31:53,427 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35743,DS-2eadd24d-1464-49b5-8718-50614ae089fd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:53,427 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35743,DS-2eadd24d-1464-49b5-8718-50614ae089fd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:53,427 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220299484 2024-11-10T06:31:53,427 WARN [IPC Server handler 0 on default port 44261 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220299484 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1015 2024-11-10T06:31:53,427 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220299484 after 0ms 2024-11-10T06:31:53,428 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39905:39905),(127.0.0.1/127.0.0.1:37221:37221)] 2024-11-10T06:31:53,428 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220299484 is not closed yet, will try archiving it next time 2024-11-10T06:31:53,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36705 is added to blk_1073741833_1017 (size=1632) 2024-11-10T06:31:54,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:54,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:55,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:55,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:55,432 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-10T06:31:56,334 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-10T06:31:56,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:56,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:57,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:57,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:57,428 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220299484 after 4001ms 2024-11-10T06:31:57,435 WARN [ResponseProcessor for block BP-794254838-172.17.0.2-1731220297963:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-794254838-172.17.0.2-1731220297963:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:57,435 WARN [DataStreamer for file /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220313419 block BP-794254838-172.17.0.2-1731220297963:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-794254838-172.17.0.2-1731220297963:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36705,DS-2eadd24d-1464-49b5-8718-50614ae089fd,DISK], DatanodeInfoWithStorage[127.0.0.1:38393,DS-cb7ce07e-c63c-4f64-9d36-1e09e417590d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36705,DS-2eadd24d-1464-49b5-8718-50614ae089fd,DISK]) is bad. 2024-11-10T06:31:57,436 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1806857954_22 at /127.0.0.1:41598 [Receiving block BP-794254838-172.17.0.2-1731220297963:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:36705:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41598 dst: /127.0.0.1:36705 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:57,436 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1806857954_22 at /127.0.0.1:36712 [Receiving block BP-794254838-172.17.0.2-1731220297963:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:38393:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36712 dst: /127.0.0.1:38393 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:57,438 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3367bf77{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:31:57,438 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@179bdcf2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:31:57,438 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:31:57,438 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4075a25a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:31:57,439 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@92a7e51{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/hadoop.log.dir/,STOPPED} 2024-11-10T06:31:57,439 WARN [BP-794254838-172.17.0.2-1731220297963 heartbeating to localhost/127.0.0.1:44261 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:31:57,439 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:31:57,440 WARN [BP-794254838-172.17.0.2-1731220297963 heartbeating to localhost/127.0.0.1:44261 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-794254838-172.17.0.2-1731220297963 (Datanode Uuid c04487f6-9a8d-4e08-a52b-3c677128c1e8) service to localhost/127.0.0.1:44261 2024-11-10T06:31:57,440 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:31:57,440 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/cluster_b8c61b44-28e1-05fd-cd2c-adcbaca76d13/data/data1/current/BP-794254838-172.17.0.2-1731220297963 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:31:57,440 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/cluster_b8c61b44-28e1-05fd-cd2c-adcbaca76d13/data/data2/current/BP-794254838-172.17.0.2-1731220297963 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:31:57,441 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:31:57,450 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:31:57,453 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:31:57,453 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:31:57,453 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:31:57,453 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T06:31:57,454 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18bcfccc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:31:57,454 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68f73adf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:31:57,570 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d222848{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/java.io.tmpdir/jetty-localhost-38927-hadoop-hdfs-3_4_1-tests_jar-_-any-10252111929968749127/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:31:57,570 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@473433d5{HTTP/1.1, (http/1.1)}{localhost:38927} 2024-11-10T06:31:57,571 INFO [Time-limited test {}] server.Server(415): Started @167271ms 2024-11-10T06:31:57,572 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:31:57,590 WARN [ResponseProcessor for block BP-794254838-172.17.0.2-1731220297963:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-794254838-172.17.0.2-1731220297963:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:57,590 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1806857954_22 at /127.0.0.1:36718 [Receiving block BP-794254838-172.17.0.2-1731220297963:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:38393:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36718 dst: /127.0.0.1:38393 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:31:57,593 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b49c453{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:31:57,593 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6618ad7a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:31:57,593 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:31:57,593 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e976720{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:31:57,594 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bd9bf17{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/hadoop.log.dir/,STOPPED} 2024-11-10T06:31:57,595 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:31:57,595 WARN [BP-794254838-172.17.0.2-1731220297963 heartbeating to localhost/127.0.0.1:44261 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:31:57,595 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:31:57,595 WARN [BP-794254838-172.17.0.2-1731220297963 heartbeating to localhost/127.0.0.1:44261 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-794254838-172.17.0.2-1731220297963 (Datanode Uuid 3b149b59-c879-4029-a86f-ab369f901f70) service to localhost/127.0.0.1:44261 2024-11-10T06:31:57,597 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/cluster_b8c61b44-28e1-05fd-cd2c-adcbaca76d13/data/data3/current/BP-794254838-172.17.0.2-1731220297963 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:31:57,597 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/cluster_b8c61b44-28e1-05fd-cd2c-adcbaca76d13/data/data4/current/BP-794254838-172.17.0.2-1731220297963 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:31:57,597 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:31:57,607 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:31:57,610 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:31:57,611 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:31:57,611 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:31:57,611 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T06:31:57,611 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1eff1159{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:31:57,612 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57d815ab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:31:57,666 WARN [Thread-1408 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:31:57,669 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8fc10d0ecf64a3e4 with lease ID 0x87b769e5337cfda: from storage DS-2eadd24d-1464-49b5-8718-50614ae089fd node DatanodeRegistration(127.0.0.1:42845, datanodeUuid=c04487f6-9a8d-4e08-a52b-3c677128c1e8, infoPort=43465, infoSecurePort=0, ipcPort=42541, storageInfo=lv=-57;cid=testClusterID;nsid=1631496314;c=1731220297963), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:31:57,669 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8fc10d0ecf64a3e4 with lease ID 0x87b769e5337cfda: from storage DS-ae13d092-e83d-4983-aa6c-2307a1abce4f node DatanodeRegistration(127.0.0.1:42845, datanodeUuid=c04487f6-9a8d-4e08-a52b-3c677128c1e8, infoPort=43465, infoSecurePort=0, ipcPort=42541, storageInfo=lv=-57;cid=testClusterID;nsid=1631496314;c=1731220297963), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:31:57,729 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@72532400{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/java.io.tmpdir/jetty-localhost-33899-hadoop-hdfs-3_4_1-tests_jar-_-any-739462519170100574/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:31:57,729 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5b0a5107{HTTP/1.1, (http/1.1)}{localhost:33899} 2024-11-10T06:31:57,729 INFO [Time-limited test {}] server.Server(415): Started @167430ms 2024-11-10T06:31:57,731 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:31:57,820 WARN [Thread-1439 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:31:57,823 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x30fed6fd0e49060f with lease ID 0x87b769e5337cfdb: from storage DS-cb7ce07e-c63c-4f64-9d36-1e09e417590d node DatanodeRegistration(127.0.0.1:33329, datanodeUuid=3b149b59-c879-4029-a86f-ab369f901f70, infoPort=45095, infoSecurePort=0, ipcPort=46307, storageInfo=lv=-57;cid=testClusterID;nsid=1631496314;c=1731220297963), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:31:57,823 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x30fed6fd0e49060f with lease ID 0x87b769e5337cfdb: from storage DS-552a8a2a-2aa9-485d-834b-d9a8ab8a8342 node DatanodeRegistration(127.0.0.1:33329, datanodeUuid=3b149b59-c879-4029-a86f-ab369f901f70, infoPort=45095, infoSecurePort=0, ipcPort=46307, storageInfo=lv=-57;cid=testClusterID;nsid=1631496314;c=1731220297963), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T06:31:58,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:58,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:58,749 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-10T06:31:58,751 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-10T06:31:58,753 ERROR [FSHLog-0-hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815-prefix:4999977c7e1b,43987,1731220298690 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38393,DS-cb7ce07e-c63c-4f64-9d36-1e09e417590d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:58,753 WARN [FSHLog-0-hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815-prefix:4999977c7e1b,43987,1731220298690 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38393,DS-cb7ce07e-c63c-4f64-9d36-1e09e417590d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:58,753 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 4999977c7e1b%2C43987%2C1731220298690:(num 1731220313419) roll requested 2024-11-10T06:31:58,753 INFO [regionserver/4999977c7e1b:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C43987%2C1731220298690.1731220318753 2024-11-10T06:31:58,759 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220313419 newFile=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220318753 2024-11-10T06:31:58,759 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:58,759 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:58,759 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:58,760 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:58,760 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:31:58,760 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220313419 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220318753 2024-11-10T06:31:58,760 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38393,DS-cb7ce07e-c63c-4f64-9d36-1e09e417590d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:58,760 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38393,DS-cb7ce07e-c63c-4f64-9d36-1e09e417590d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:31:58,760 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220313419 2024-11-10T06:31:58,761 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45095:45095),(127.0.0.1/127.0.0.1:43465:43465)] 2024-11-10T06:31:58,761 WARN [IPC Server handler 2 on default port 44261 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220313419 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-10T06:31:58,761 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220313419 is not closed yet, will try archiving it next time 2024-11-10T06:31:58,761 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220313419 after 1ms 2024-11-10T06:31:59,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:31:59,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:00,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:00,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:00,762 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C43987%2C1731220298690.1731220320762 2024-11-10T06:32:00,769 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220318753 newFile=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220320762 2024-11-10T06:32:00,769 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:00,769 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:00,769 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:00,769 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:00,769 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:00,770 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220318753 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220320762 2024-11-10T06:32:00,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42845 is added to blk_1073741838_1019 (size=1264) 2024-11-10T06:32:00,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33329 is added to blk_1073741838_1019 (size=1264) 2024-11-10T06:32:00,772 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43465:43465),(127.0.0.1/127.0.0.1:45095:45095)] 2024-11-10T06:32:00,772 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220313419 is not closed yet, will try archiving it next time 2024-11-10T06:32:00,772 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220318753 is not closed yet, will try archiving it next time 2024-11-10T06:32:00,772 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220313419 is not closed yet, will try archiving it next time 2024-11-10T06:32:00,772 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220299484 2024-11-10T06:32:00,772 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220299484 2024-11-10T06:32:00,773 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220299484 after 1ms 2024-11-10T06:32:00,773 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220299484 2024-11-10T06:32:00,785 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731220300382/Put/vlen=218/seqid=0] 2024-11-10T06:32:00,785 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731220310084/Put/vlen=1045/seqid=0] 2024-11-10T06:32:00,785 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220299484 2024-11-10T06:32:00,785 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220313419 2024-11-10T06:32:00,785 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220313419 2024-11-10T06:32:00,786 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220313419 after 1ms 2024-11-10T06:32:00,786 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220313419 2024-11-10T06:32:00,789 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731220313419/Put/vlen=1045/seqid=0] 2024-11-10T06:32:00,789 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731220315433/Put/vlen=1045/seqid=0] 2024-11-10T06:32:00,789 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220313419 2024-11-10T06:32:00,789 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220318753 2024-11-10T06:32:00,789 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220318753 2024-11-10T06:32:00,790 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220318753 after 1ms 2024-11-10T06:32:00,790 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220318753 2024-11-10T06:32:00,793 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731220318752/Put/vlen=1045/seqid=0] 2024-11-10T06:32:00,793 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220320762 2024-11-10T06:32:00,793 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220320762 2024-11-10T06:32:00,793 WARN [IPC Server handler 0 on default port 44261 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220320762 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-10T06:32:00,793 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220320762 after 0ms 2024-11-10T06:32:01,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:01,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:01,671 WARN [ResponseProcessor for block BP-794254838-172.17.0.2-1731220297963:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-794254838-172.17.0.2-1731220297963:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:32:01,671 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842138937_22 at /127.0.0.1:50356 [Receiving block BP-794254838-172.17.0.2-1731220297963:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:42845:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50356 dst: /127.0.0.1:42845 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:42845 remote=/127.0.0.1:50356]. Total timeout mills is 60000, 59098 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:32:01,671 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842138937_22 at /127.0.0.1:37026 [Receiving block BP-794254838-172.17.0.2-1731220297963:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:33329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37026 dst: /127.0.0.1:33329 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:32:01,671 WARN [DataStreamer for file /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220320762 block BP-794254838-172.17.0.2-1731220297963:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-794254838-172.17.0.2-1731220297963:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42845,DS-2eadd24d-1464-49b5-8718-50614ae089fd,DISK], DatanodeInfoWithStorage[127.0.0.1:33329,DS-cb7ce07e-c63c-4f64-9d36-1e09e417590d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42845,DS-2eadd24d-1464-49b5-8718-50614ae089fd,DISK]) is bad. 2024-11-10T06:32:01,672 WARN [DataStreamer for file /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220320762 block BP-794254838-172.17.0.2-1731220297963:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-794254838-172.17.0.2-1731220297963:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:32:01,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42845 is added to blk_1073741839_1022 (size=85) 2024-11-10T06:32:01,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33329 is added to blk_1073741839_1022 (size=85) 2024-11-10T06:32:02,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:02,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:02,670 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-10T06:32:02,762 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220313419 after 4002ms 2024-11-10T06:32:03,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:03,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:04,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:04,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:04,794 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220320762 after 4001ms 2024-11-10T06:32:04,794 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220320762 2024-11-10T06:32:04,798 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220320762 2024-11-10T06:32:04,798 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-10T06:32:04,799 ERROR [FSHLog-0-hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815-prefix:4999977c7e1b,43987,1731220298690.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35743,DS-2eadd24d-1464-49b5-8718-50614ae089fd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:32:04,799 WARN [FSHLog-0-hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815-prefix:4999977c7e1b,43987,1731220298690.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35743,DS-2eadd24d-1464-49b5-8718-50614ae089fd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:32:04,799 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 4999977c7e1b%2C43987%2C1731220298690.meta:.meta(num 1731220299894) roll requested 2024-11-10T06:32:04,799 INFO [regionserver/4999977c7e1b:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C43987%2C1731220298690.meta.1731220324799.meta 2024-11-10T06:32:04,804 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:04,804 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:04,804 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:04,805 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:04,805 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:04,805 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.meta.1731220299894.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.meta.1731220324799.meta 2024-11-10T06:32:04,805 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35743,DS-2eadd24d-1464-49b5-8718-50614ae089fd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:32:04,805 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35743,DS-2eadd24d-1464-49b5-8718-50614ae089fd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:32:04,805 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.meta.1731220299894.meta 2024-11-10T06:32:04,806 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45095:45095),(127.0.0.1/127.0.0.1:43465:43465)] 2024-11-10T06:32:04,806 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.meta.1731220299894.meta is not closed yet, will try archiving it next time 2024-11-10T06:32:04,806 WARN [IPC Server handler 1 on default port 44261 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.meta.1731220299894.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1014 2024-11-10T06:32:04,806 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.meta.1731220299894.meta after 1ms 2024-11-10T06:32:04,822 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740/.tmp/info/2bd8abebbeb1423d9f537d1f793c06a9 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d./info:regioninfo/1731220300386/Put/seqid=0 2024-11-10T06:32:04,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33329 is added to blk_1073741841_1025 (size=7125) 2024-11-10T06:32:04,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42845 is added to blk_1073741841_1025 (size=7125) 2024-11-10T06:32:04,827 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740/.tmp/info/2bd8abebbeb1423d9f537d1f793c06a9 2024-11-10T06:32:04,847 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740/.tmp/ns/0d97edb1e3c04444ac78f848c39d0785 is 43, key is default/ns:d/1731220299934/Put/seqid=0 2024-11-10T06:32:04,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42845 is added to blk_1073741842_1026 (size=5153) 2024-11-10T06:32:04,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33329 is added to blk_1073741842_1026 (size=5153) 2024-11-10T06:32:04,853 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740/.tmp/ns/0d97edb1e3c04444ac78f848c39d0785 2024-11-10T06:32:04,873 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740/.tmp/table/5c5b0972d93a4545bd6dee690d2dd2d3 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731220300397/Put/seqid=0 2024-11-10T06:32:04,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42845 is added to blk_1073741843_1027 (size=5438) 2024-11-10T06:32:04,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33329 is added to blk_1073741843_1027 (size=5438) 2024-11-10T06:32:04,878 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740/.tmp/table/5c5b0972d93a4545bd6dee690d2dd2d3 2024-11-10T06:32:04,884 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740/.tmp/info/2bd8abebbeb1423d9f537d1f793c06a9 as hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740/info/2bd8abebbeb1423d9f537d1f793c06a9 2024-11-10T06:32:04,889 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740/info/2bd8abebbeb1423d9f537d1f793c06a9, entries=10, sequenceid=11, filesize=7.0 K 2024-11-10T06:32:04,890 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740/.tmp/ns/0d97edb1e3c04444ac78f848c39d0785 as hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740/ns/0d97edb1e3c04444ac78f848c39d0785 2024-11-10T06:32:04,895 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740/ns/0d97edb1e3c04444ac78f848c39d0785, entries=2, sequenceid=11, filesize=5.0 K 2024-11-10T06:32:04,896 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740/.tmp/table/5c5b0972d93a4545bd6dee690d2dd2d3 as hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740/table/5c5b0972d93a4545bd6dee690d2dd2d3 2024-11-10T06:32:04,901 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740/table/5c5b0972d93a4545bd6dee690d2dd2d3, entries=2, sequenceid=11, filesize=5.3 K 2024-11-10T06:32:04,902 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 104ms, sequenceid=11, compaction requested=false 2024-11-10T06:32:04,902 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-10T06:32:04,902 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing db672abcf0d9b1980083110ed63ae26d 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-10T06:32:04,903 ERROR [FSHLog-0-hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815-prefix:4999977c7e1b,43987,1731220298690 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-794254838-172.17.0.2-1731220297963:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:32:04,903 WARN [FSHLog-0-hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815-prefix:4999977c7e1b,43987,1731220298690 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-794254838-172.17.0.2-1731220297963:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:32:04,903 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 4999977c7e1b%2C43987%2C1731220298690:(num 1731220320762) roll requested 2024-11-10T06:32:04,903 INFO [regionserver/4999977c7e1b:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C43987%2C1731220298690.1731220324903 2024-11-10T06:32:04,908 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220320762 newFile=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220324903 2024-11-10T06:32:04,908 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:04,908 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:04,908 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:04,908 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:04,908 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:04,909 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220320762 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220324903 2024-11-10T06:32:04,909 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-794254838-172.17.0.2-1731220297963:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:32:04,909 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-794254838-172.17.0.2-1731220297963:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:32:04,909 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220320762 2024-11-10T06:32:04,910 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220320762 after 1ms 2024-11-10T06:32:04,913 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.1731220320762 to hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/oldWALs/4999977c7e1b%2C43987%2C1731220298690.1731220320762 2024-11-10T06:32:04,913 DEBUG [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45095:45095),(127.0.0.1/127.0.0.1:43465:43465)] 2024-11-10T06:32:04,928 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/default/TestLogRolling-testLogRollOnPipelineRestart/db672abcf0d9b1980083110ed63ae26d/.tmp/info/7455099203ac436ebb27885090c80f70 is 1080, key is row1002/info:/1731220310084/Put/seqid=0 2024-11-10T06:32:04,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42845 is added to blk_1073741845_1029 (size=9270) 2024-11-10T06:32:04,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33329 is added to blk_1073741845_1029 (size=9270) 2024-11-10T06:32:04,934 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/default/TestLogRolling-testLogRollOnPipelineRestart/db672abcf0d9b1980083110ed63ae26d/.tmp/info/7455099203ac436ebb27885090c80f70 2024-11-10T06:32:04,940 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/default/TestLogRolling-testLogRollOnPipelineRestart/db672abcf0d9b1980083110ed63ae26d/.tmp/info/7455099203ac436ebb27885090c80f70 as hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/default/TestLogRolling-testLogRollOnPipelineRestart/db672abcf0d9b1980083110ed63ae26d/info/7455099203ac436ebb27885090c80f70 2024-11-10T06:32:04,945 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/default/TestLogRolling-testLogRollOnPipelineRestart/db672abcf0d9b1980083110ed63ae26d/info/7455099203ac436ebb27885090c80f70, entries=4, sequenceid=8, filesize=9.1 K 2024-11-10T06:32:04,946 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for db672abcf0d9b1980083110ed63ae26d in 44ms, sequenceid=8, compaction requested=false 2024-11-10T06:32:04,946 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for db672abcf0d9b1980083110ed63ae26d: 2024-11-10T06:32:04,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T06:32:04,951 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T06:32:04,951 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:32:04,951 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:32:04,951 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:32:04,951 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T06:32:04,952 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T06:32:04,952 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1559457326, stopped=false 2024-11-10T06:32:04,952 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=4999977c7e1b,35461,1731220298644 2024-11-10T06:32:04,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T06:32:04,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43987-0x10190e0d0ed0001, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T06:32:04,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:32:04,957 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T06:32:04,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43987-0x10190e0d0ed0001, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:32:04,957 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T06:32:04,957 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43987-0x10190e0d0ed0001, quorum=127.0.0.1:55423, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:32:04,958 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:32:04,958 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:32:04,958 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:32:04,958 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4999977c7e1b,43987,1731220298690' ***** 2024-11-10T06:32:04,958 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T06:32:04,958 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T06:32:04,958 INFO [RS:0;4999977c7e1b:43987 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T06:32:04,958 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T06:32:04,958 INFO [RS:0;4999977c7e1b:43987 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T06:32:04,958 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.HRegionServer(3091): Received CLOSE for db672abcf0d9b1980083110ed63ae26d 2024-11-10T06:32:04,959 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.HRegionServer(959): stopping server 4999977c7e1b,43987,1731220298690 2024-11-10T06:32:04,959 INFO [RS:0;4999977c7e1b:43987 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T06:32:04,959 INFO [RS:0;4999977c7e1b:43987 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;4999977c7e1b:43987. 2024-11-10T06:32:04,959 DEBUG [RS:0;4999977c7e1b:43987 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:32:04,959 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing db672abcf0d9b1980083110ed63ae26d, disabling compactions & flushes 2024-11-10T06:32:04,959 DEBUG [RS:0;4999977c7e1b:43987 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:32:04,959 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d. 2024-11-10T06:32:04,959 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d. 2024-11-10T06:32:04,959 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T06:32:04,959 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d. after waiting 0 ms 2024-11-10T06:32:04,959 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T06:32:04,959 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d. 2024-11-10T06:32:04,959 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T06:32:04,959 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T06:32:04,959 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-10T06:32:04,959 DEBUG [RS:0;4999977c7e1b:43987 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, db672abcf0d9b1980083110ed63ae26d=TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d.} 2024-11-10T06:32:04,959 DEBUG [RS:0;4999977c7e1b:43987 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, db672abcf0d9b1980083110ed63ae26d 2024-11-10T06:32:04,959 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T06:32:04,960 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T06:32:04,960 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T06:32:04,960 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T06:32:04,960 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T06:32:04,963 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/default/TestLogRolling-testLogRollOnPipelineRestart/db672abcf0d9b1980083110ed63ae26d/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-10T06:32:04,964 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d. 2024-11-10T06:32:04,964 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-10T06:32:04,964 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for db672abcf0d9b1980083110ed63ae26d: Waiting for close lock at 1731220324959Running coprocessor pre-close hooks at 1731220324959Disabling compacts and flushes for region at 1731220324959Disabling writes for close at 1731220324959Writing region close event to WAL at 1731220324960 (+1 ms)Running coprocessor post-close hooks at 1731220324964 (+4 ms)Closed at 1731220324964 2024-11-10T06:32:04,964 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731220300029.db672abcf0d9b1980083110ed63ae26d. 2024-11-10T06:32:04,965 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T06:32:04,965 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T06:32:04,965 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731220324959Running coprocessor pre-close hooks at 1731220324959Disabling compacts and flushes for region at 1731220324959Disabling writes for close at 1731220324960 (+1 ms)Writing region close event to WAL at 1731220324961 (+1 ms)Running coprocessor post-close hooks at 1731220324965 (+4 ms)Closed at 1731220324965 2024-11-10T06:32:04,965 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T06:32:05,160 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.HRegionServer(976): stopping server 4999977c7e1b,43987,1731220298690; all regions closed. 2024-11-10T06:32:05,160 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:05,160 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:05,160 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:05,161 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:05,161 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:05,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42845 is added to blk_1073741840_1023 (size=825) 2024-11-10T06:32:05,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33329 is added to blk_1073741840_1023 (size=825) 2024-11-10T06:32:05,352 INFO [regionserver/4999977c7e1b:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T06:32:05,369 INFO [regionserver/4999977c7e1b:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-10T06:32:05,369 INFO [regionserver/4999977c7e1b:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-10T06:32:05,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:05,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:05,498 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T06:32:05,499 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-10T06:32:05,499 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-10T06:32:06,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:06,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:07,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:07,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:08,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:08,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:08,627 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T06:32:08,807 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.meta.1731220299894.meta after 4002ms 2024-11-10T06:32:08,807 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/WALs/4999977c7e1b,43987,1731220298690/4999977c7e1b%2C43987%2C1731220298690.meta.1731220299894.meta to hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/oldWALs/4999977c7e1b%2C43987%2C1731220298690.meta.1731220299894.meta 2024-11-10T06:32:08,810 DEBUG [RS:0;4999977c7e1b:43987 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/oldWALs 2024-11-10T06:32:08,810 INFO [RS:0;4999977c7e1b:43987 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 4999977c7e1b%2C43987%2C1731220298690.meta:.meta(num 1731220324799) 2024-11-10T06:32:08,810 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:08,810 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:08,810 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:08,811 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:08,811 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:08,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42845 is added to blk_1073741844_1028 (size=1162) 2024-11-10T06:32:08,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33329 is added to blk_1073741844_1028 (size=1162) 2024-11-10T06:32:08,817 DEBUG [RS:0;4999977c7e1b:43987 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/oldWALs 2024-11-10T06:32:08,817 INFO [RS:0;4999977c7e1b:43987 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 4999977c7e1b%2C43987%2C1731220298690:(num 1731220324903) 2024-11-10T06:32:08,817 DEBUG [RS:0;4999977c7e1b:43987 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:32:08,817 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T06:32:08,817 INFO [RS:0;4999977c7e1b:43987 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T06:32:08,817 INFO [RS:0;4999977c7e1b:43987 {}] hbase.ChoreService(370): Chore service for: regionserver/4999977c7e1b:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T06:32:08,817 INFO [RS:0;4999977c7e1b:43987 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T06:32:08,817 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T06:32:08,818 INFO [RS:0;4999977c7e1b:43987 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43987 2024-11-10T06:32:08,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T06:32:08,820 INFO [RS:0;4999977c7e1b:43987 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T06:32:08,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43987-0x10190e0d0ed0001, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4999977c7e1b,43987,1731220298690 2024-11-10T06:32:08,821 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4999977c7e1b,43987,1731220298690] 2024-11-10T06:32:08,822 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4999977c7e1b,43987,1731220298690 already deleted, retry=false 2024-11-10T06:32:08,822 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4999977c7e1b,43987,1731220298690 expired; onlineServers=0 2024-11-10T06:32:08,823 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '4999977c7e1b,35461,1731220298644' ***** 2024-11-10T06:32:08,823 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T06:32:08,822 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-10T06:32:08,823 INFO [M:0;4999977c7e1b:35461 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T06:32:08,823 INFO [M:0;4999977c7e1b:35461 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T06:32:08,823 DEBUG [M:0;4999977c7e1b:35461 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T06:32:08,823 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T06:32:08,823 DEBUG [M:0;4999977c7e1b:35461 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T06:32:08,823 DEBUG [master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220299284 {}] cleaner.HFileCleaner(306): Exit Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220299284,5,FailOnTimeoutGroup] 2024-11-10T06:32:08,823 DEBUG [master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220299285 {}] cleaner.HFileCleaner(306): Exit Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220299285,5,FailOnTimeoutGroup] 2024-11-10T06:32:08,823 INFO [M:0;4999977c7e1b:35461 {}] hbase.ChoreService(370): Chore service for: master/4999977c7e1b:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T06:32:08,823 INFO [M:0;4999977c7e1b:35461 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T06:32:08,824 DEBUG [M:0;4999977c7e1b:35461 {}] master.HMaster(1795): Stopping service threads 2024-11-10T06:32:08,824 INFO [M:0;4999977c7e1b:35461 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T06:32:08,824 INFO [M:0;4999977c7e1b:35461 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T06:32:08,824 INFO [M:0;4999977c7e1b:35461 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T06:32:08,824 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T06:32:08,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T06:32:08,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:32:08,825 DEBUG [M:0;4999977c7e1b:35461 {}] zookeeper.ZKUtil(347): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T06:32:08,825 WARN [M:0;4999977c7e1b:35461 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T06:32:08,825 INFO [M:0;4999977c7e1b:35461 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/.lastflushedseqids 2024-11-10T06:32:08,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33329 is added to blk_1073741846_1030 (size=111) 2024-11-10T06:32:08,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42845 is added to blk_1073741846_1030 (size=111) 2024-11-10T06:32:08,831 INFO [M:0;4999977c7e1b:35461 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T06:32:08,831 INFO [M:0;4999977c7e1b:35461 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T06:32:08,831 DEBUG [M:0;4999977c7e1b:35461 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T06:32:08,831 INFO [M:0;4999977c7e1b:35461 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:32:08,831 DEBUG [M:0;4999977c7e1b:35461 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:32:08,831 DEBUG [M:0;4999977c7e1b:35461 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T06:32:08,831 DEBUG [M:0;4999977c7e1b:35461 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:32:08,831 INFO [M:0;4999977c7e1b:35461 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-10T06:32:08,832 ERROR [FSHLog-0-hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData-prefix:4999977c7e1b,35461,1731220298644 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35743,DS-2eadd24d-1464-49b5-8718-50614ae089fd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:32:08,832 WARN [FSHLog-0-hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData-prefix:4999977c7e1b,35461,1731220298644 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35743,DS-2eadd24d-1464-49b5-8718-50614ae089fd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:32:08,832 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 4999977c7e1b%2C35461%2C1731220298644:(num 1731220299192) roll requested 2024-11-10T06:32:08,832 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C35461%2C1731220298644.1731220328832 2024-11-10T06:32:08,837 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:08,837 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:08,837 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:08,837 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:08,837 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:08,837 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/WALs/4999977c7e1b,35461,1731220298644/4999977c7e1b%2C35461%2C1731220298644.1731220299192 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/WALs/4999977c7e1b,35461,1731220298644/4999977c7e1b%2C35461%2C1731220298644.1731220328832 2024-11-10T06:32:08,838 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35743,DS-2eadd24d-1464-49b5-8718-50614ae089fd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:32:08,838 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35743,DS-2eadd24d-1464-49b5-8718-50614ae089fd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T06:32:08,838 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/WALs/4999977c7e1b,35461,1731220298644/4999977c7e1b%2C35461%2C1731220298644.1731220299192 2024-11-10T06:32:08,838 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43465:43465),(127.0.0.1/127.0.0.1:45095:45095)] 2024-11-10T06:32:08,838 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/WALs/4999977c7e1b,35461,1731220298644/4999977c7e1b%2C35461%2C1731220298644.1731220299192 is not closed yet, will try archiving it next time 2024-11-10T06:32:08,838 WARN [IPC Server handler 3 on default port 44261 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/WALs/4999977c7e1b,35461,1731220298644/4999977c7e1b%2C35461%2C1731220298644.1731220299192 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-10T06:32:08,839 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/WALs/4999977c7e1b,35461,1731220298644/4999977c7e1b%2C35461%2C1731220298644.1731220299192 after 1ms 2024-11-10T06:32:08,854 DEBUG [M:0;4999977c7e1b:35461 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d8822ddbf2fd43d6bbe8ea4f20abcbe3 is 82, key is hbase:meta,,1/info:regioninfo/1731220299918/Put/seqid=0 2024-11-10T06:32:08,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33329 is added to blk_1073741848_1033 (size=5672) 2024-11-10T06:32:08,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42845 is added to blk_1073741848_1033 (size=5672) 2024-11-10T06:32:08,859 INFO [M:0;4999977c7e1b:35461 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d8822ddbf2fd43d6bbe8ea4f20abcbe3 2024-11-10T06:32:08,878 DEBUG [M:0;4999977c7e1b:35461 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/decc6c1b55e54aa18d90f4598a9a2ded is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731220300401/Put/seqid=0 2024-11-10T06:32:08,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33329 is added to blk_1073741849_1034 (size=6119) 2024-11-10T06:32:08,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42845 is added to blk_1073741849_1034 (size=6119) 2024-11-10T06:32:08,884 INFO [M:0;4999977c7e1b:35461 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/decc6c1b55e54aa18d90f4598a9a2ded 2024-11-10T06:32:08,910 DEBUG [M:0;4999977c7e1b:35461 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf807d135cd44b48ad253c98692323ea is 69, key is 4999977c7e1b,43987,1731220298690/rs:state/1731220299338/Put/seqid=0 2024-11-10T06:32:08,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33329 is added to blk_1073741850_1035 (size=5156) 2024-11-10T06:32:08,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42845 is added to blk_1073741850_1035 (size=5156) 2024-11-10T06:32:08,916 INFO [M:0;4999977c7e1b:35461 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf807d135cd44b48ad253c98692323ea 2024-11-10T06:32:08,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43987-0x10190e0d0ed0001, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:32:08,921 INFO [RS:0;4999977c7e1b:43987 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T06:32:08,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43987-0x10190e0d0ed0001, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:32:08,921 INFO [RS:0;4999977c7e1b:43987 {}] regionserver.HRegionServer(1031): Exiting; stopping=4999977c7e1b,43987,1731220298690; zookeeper connection closed. 2024-11-10T06:32:08,922 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@bf06cdc {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@bf06cdc 2024-11-10T06:32:08,922 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-10T06:32:08,945 DEBUG [M:0;4999977c7e1b:35461 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/41f1eb15aef04b9ea838e7d2c4340824 is 52, key is load_balancer_on/state:d/1731220300024/Put/seqid=0 2024-11-10T06:32:08,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33329 is added to blk_1073741851_1036 (size=5056) 2024-11-10T06:32:08,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42845 is added to blk_1073741851_1036 (size=5056) 2024-11-10T06:32:08,951 INFO [M:0;4999977c7e1b:35461 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/41f1eb15aef04b9ea838e7d2c4340824 2024-11-10T06:32:08,958 DEBUG [M:0;4999977c7e1b:35461 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d8822ddbf2fd43d6bbe8ea4f20abcbe3 as hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d8822ddbf2fd43d6bbe8ea4f20abcbe3 2024-11-10T06:32:08,963 INFO [M:0;4999977c7e1b:35461 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d8822ddbf2fd43d6bbe8ea4f20abcbe3, entries=8, sequenceid=56, filesize=5.5 K 2024-11-10T06:32:08,964 DEBUG [M:0;4999977c7e1b:35461 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/decc6c1b55e54aa18d90f4598a9a2ded as hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/decc6c1b55e54aa18d90f4598a9a2ded 2024-11-10T06:32:08,969 INFO [M:0;4999977c7e1b:35461 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/decc6c1b55e54aa18d90f4598a9a2ded, entries=6, sequenceid=56, filesize=6.0 K 2024-11-10T06:32:08,970 DEBUG [M:0;4999977c7e1b:35461 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf807d135cd44b48ad253c98692323ea as hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cf807d135cd44b48ad253c98692323ea 2024-11-10T06:32:08,976 INFO [M:0;4999977c7e1b:35461 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cf807d135cd44b48ad253c98692323ea, entries=1, sequenceid=56, filesize=5.0 K 2024-11-10T06:32:08,977 DEBUG [M:0;4999977c7e1b:35461 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/41f1eb15aef04b9ea838e7d2c4340824 as hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/41f1eb15aef04b9ea838e7d2c4340824 2024-11-10T06:32:08,982 INFO [M:0;4999977c7e1b:35461 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/41f1eb15aef04b9ea838e7d2c4340824, entries=1, sequenceid=56, filesize=4.9 K 2024-11-10T06:32:08,983 INFO [M:0;4999977c7e1b:35461 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 152ms, sequenceid=56, compaction requested=false 2024-11-10T06:32:08,986 INFO [M:0;4999977c7e1b:35461 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:32:08,986 DEBUG [M:0;4999977c7e1b:35461 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731220328831Disabling compacts and flushes for region at 1731220328831Disabling writes for close at 1731220328831Obtaining lock to block concurrent updates at 1731220328831Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731220328831Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731220328832 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731220328838 (+6 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731220328839 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731220328853 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731220328853Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731220328864 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731220328878 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731220328878Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731220328888 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731220328909 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731220328909Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731220328923 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731220328944 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731220328944Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4212143b: reopening flushed file at 1731220328957 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a1bbf22: reopening flushed file at 1731220328963 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@735d1469: reopening flushed file at 1731220328969 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@372f9ac1: reopening flushed file at 1731220328976 (+7 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 152ms, sequenceid=56, compaction requested=false at 1731220328983 (+7 ms)Writing region close event to WAL at 1731220328986 (+3 ms)Closed at 1731220328986 2024-11-10T06:32:08,987 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:08,987 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:08,987 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:08,987 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:08,987 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:08,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33329 is added to blk_1073741847_1031 (size=757) 2024-11-10T06:32:08,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42845 is added to blk_1073741847_1031 (size=757) 2024-11-10T06:32:09,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:09,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:09,965 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:09,965 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:09,976 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:09,976 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:09,976 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:09,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:09,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:09,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:09,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:09,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:09,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:09,982 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:09,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:09,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:10,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:10,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:10,489 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-10T06:32:10,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:10,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:10,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:10,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:10,504 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:10,504 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:10,505 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:10,505 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:10,505 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:10,505 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:10,508 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:10,509 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:10,509 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:10,511 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:11,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:11,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:11,822 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-10T06:32:12,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:12,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:12,839 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/WALs/4999977c7e1b,35461,1731220298644/4999977c7e1b%2C35461%2C1731220298644.1731220299192 after 4001ms 2024-11-10T06:32:12,840 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/WALs/4999977c7e1b,35461,1731220298644/4999977c7e1b%2C35461%2C1731220298644.1731220299192 to hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/oldWALs/4999977c7e1b%2C35461%2C1731220298644.1731220299192 2024-11-10T06:32:12,843 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/MasterData/oldWALs/4999977c7e1b%2C35461%2C1731220298644.1731220299192 to hdfs://localhost:44261/user/jenkins/test-data/9b3e5502-0501-5f82-436a-ee3b01059815/oldWALs/4999977c7e1b%2C35461%2C1731220298644.1731220299192$masterlocalwal$ 2024-11-10T06:32:12,843 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T06:32:12,843 INFO [M:0;4999977c7e1b:35461 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T06:32:12,843 INFO [M:0;4999977c7e1b:35461 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35461 2024-11-10T06:32:12,844 INFO [M:0;4999977c7e1b:35461 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T06:32:12,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:32:12,947 INFO [M:0;4999977c7e1b:35461 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T06:32:12,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35461-0x10190e0d0ed0000, quorum=127.0.0.1:55423, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:32:12,949 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@72532400{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:32:12,950 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5b0a5107{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:32:12,950 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:32:12,950 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57d815ab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:32:12,950 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1eff1159{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/hadoop.log.dir/,STOPPED} 2024-11-10T06:32:12,951 WARN [BP-794254838-172.17.0.2-1731220297963 heartbeating to localhost/127.0.0.1:44261 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:32:12,951 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:32:12,951 WARN [BP-794254838-172.17.0.2-1731220297963 heartbeating to localhost/127.0.0.1:44261 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-794254838-172.17.0.2-1731220297963 (Datanode Uuid 3b149b59-c879-4029-a86f-ab369f901f70) service to localhost/127.0.0.1:44261 2024-11-10T06:32:12,951 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:32:12,952 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/cluster_b8c61b44-28e1-05fd-cd2c-adcbaca76d13/data/data3/current/BP-794254838-172.17.0.2-1731220297963 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:32:12,952 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/cluster_b8c61b44-28e1-05fd-cd2c-adcbaca76d13/data/data4/current/BP-794254838-172.17.0.2-1731220297963 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:32:12,953 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:32:12,954 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d222848{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:32:12,955 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@473433d5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:32:12,955 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:32:12,955 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68f73adf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:32:12,955 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18bcfccc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/hadoop.log.dir/,STOPPED} 2024-11-10T06:32:12,956 WARN [BP-794254838-172.17.0.2-1731220297963 heartbeating to localhost/127.0.0.1:44261 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:32:12,956 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:32:12,956 WARN [BP-794254838-172.17.0.2-1731220297963 heartbeating to localhost/127.0.0.1:44261 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-794254838-172.17.0.2-1731220297963 (Datanode Uuid c04487f6-9a8d-4e08-a52b-3c677128c1e8) service to localhost/127.0.0.1:44261 2024-11-10T06:32:12,956 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:32:12,957 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/cluster_b8c61b44-28e1-05fd-cd2c-adcbaca76d13/data/data1/current/BP-794254838-172.17.0.2-1731220297963 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:32:12,957 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/cluster_b8c61b44-28e1-05fd-cd2c-adcbaca76d13/data/data2/current/BP-794254838-172.17.0.2-1731220297963 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:32:12,957 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:32:12,962 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@278324d2{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T06:32:12,963 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2b3cd21a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:32:12,963 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:32:12,963 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49dea66d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:32:12,963 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70fc3167{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/hadoop.log.dir/,STOPPED} 2024-11-10T06:32:12,969 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-10T06:32:12,987 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-10T06:32:12,999 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 153) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:44261 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44261 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44261 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:44261 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:44261 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44261 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:44261 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44261 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=92 (was 3) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7034 (was 7191) 2024-11-10T06:32:13,006 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=92, ProcessCount=11, AvailableMemoryMB=7034 2024-11-10T06:32:13,006 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T06:32:13,006 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/hadoop.log.dir so I do NOT create it in target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713 2024-11-10T06:32:13,006 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f37f590-6c90-9994-ab43-46d5033b5c0f/hadoop.tmp.dir so I do NOT create it in target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713 2024-11-10T06:32:13,006 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/cluster_8b4a91a1-e44b-f735-838e-42cef32803bf, deleteOnExit=true 2024-11-10T06:32:13,006 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-10T06:32:13,006 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/test.cache.data in system properties and HBase conf 2024-11-10T06:32:13,006 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T06:32:13,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/hadoop.log.dir in system properties and HBase conf 2024-11-10T06:32:13,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T06:32:13,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T06:32:13,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T06:32:13,007 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T06:32:13,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T06:32:13,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T06:32:13,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T06:32:13,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T06:32:13,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T06:32:13,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T06:32:13,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T06:32:13,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T06:32:13,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T06:32:13,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/nfs.dump.dir in system properties and HBase conf 2024-11-10T06:32:13,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/java.io.tmpdir in system properties and HBase conf 2024-11-10T06:32:13,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T06:32:13,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T06:32:13,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T06:32:13,021 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T06:32:13,086 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:32:13,091 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:32:13,092 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:32:13,092 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:32:13,092 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T06:32:13,093 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:32:13,096 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30ab2a92{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:32:13,096 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7721c444{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:32:13,210 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@fa7e208{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/java.io.tmpdir/jetty-localhost-33849-hadoop-hdfs-3_4_1-tests_jar-_-any-8742092710768459000/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T06:32:13,210 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@db2e6f4{HTTP/1.1, (http/1.1)}{localhost:33849} 2024-11-10T06:32:13,210 INFO [Time-limited test {}] server.Server(415): Started @182911ms 2024-11-10T06:32:13,223 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T06:32:13,275 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:32:13,278 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:32:13,280 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:32:13,280 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:32:13,280 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T06:32:13,281 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@432cb77c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:32:13,281 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1357824d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:32:13,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@44ad4d72{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/java.io.tmpdir/jetty-localhost-42569-hadoop-hdfs-3_4_1-tests_jar-_-any-231669113274838104/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:32:13,396 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3020717b{HTTP/1.1, (http/1.1)}{localhost:42569} 2024-11-10T06:32:13,396 INFO [Time-limited test {}] server.Server(415): Started @183097ms 2024-11-10T06:32:13,397 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:32:13,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:13,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:13,426 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:32:13,428 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:32:13,429 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:32:13,429 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:32:13,429 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T06:32:13,430 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78346695{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:32:13,430 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78677773{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:32:13,483 WARN [Thread-1634 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/cluster_8b4a91a1-e44b-f735-838e-42cef32803bf/data/data2/current/BP-763120891-172.17.0.2-1731220333038/current, will proceed with Du for space computation calculation, 2024-11-10T06:32:13,483 WARN [Thread-1633 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/cluster_8b4a91a1-e44b-f735-838e-42cef32803bf/data/data1/current/BP-763120891-172.17.0.2-1731220333038/current, will proceed with Du for space computation calculation, 2024-11-10T06:32:13,500 WARN [Thread-1612 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:32:13,502 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x23a5a0929beed692 with lease ID 0x3e663ca3cf8e96b5: Processing first storage report for DS-01b1fb0a-9aab-45f4-9f7d-b84e716f92bd from datanode DatanodeRegistration(127.0.0.1:44267, datanodeUuid=6ed51e31-d9f5-477a-b4b4-f1c0f6ae70f5, infoPort=39235, infoSecurePort=0, ipcPort=43079, storageInfo=lv=-57;cid=testClusterID;nsid=1669176517;c=1731220333038) 2024-11-10T06:32:13,503 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x23a5a0929beed692 with lease ID 0x3e663ca3cf8e96b5: from storage DS-01b1fb0a-9aab-45f4-9f7d-b84e716f92bd node DatanodeRegistration(127.0.0.1:44267, datanodeUuid=6ed51e31-d9f5-477a-b4b4-f1c0f6ae70f5, infoPort=39235, infoSecurePort=0, ipcPort=43079, storageInfo=lv=-57;cid=testClusterID;nsid=1669176517;c=1731220333038), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:32:13,503 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x23a5a0929beed692 with lease ID 0x3e663ca3cf8e96b5: Processing first storage report for DS-67060d6c-9fe4-4620-a122-84f853fdf527 from datanode DatanodeRegistration(127.0.0.1:44267, datanodeUuid=6ed51e31-d9f5-477a-b4b4-f1c0f6ae70f5, infoPort=39235, infoSecurePort=0, ipcPort=43079, storageInfo=lv=-57;cid=testClusterID;nsid=1669176517;c=1731220333038) 2024-11-10T06:32:13,503 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x23a5a0929beed692 with lease ID 0x3e663ca3cf8e96b5: from storage DS-67060d6c-9fe4-4620-a122-84f853fdf527 node DatanodeRegistration(127.0.0.1:44267, datanodeUuid=6ed51e31-d9f5-477a-b4b4-f1c0f6ae70f5, infoPort=39235, infoSecurePort=0, ipcPort=43079, storageInfo=lv=-57;cid=testClusterID;nsid=1669176517;c=1731220333038), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:32:13,546 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@38bb5d1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/java.io.tmpdir/jetty-localhost-44689-hadoop-hdfs-3_4_1-tests_jar-_-any-18075882720623169765/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:32:13,547 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@26789c81{HTTP/1.1, (http/1.1)}{localhost:44689} 2024-11-10T06:32:13,547 INFO [Time-limited test {}] server.Server(415): Started @183247ms 2024-11-10T06:32:13,548 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:32:13,641 WARN [Thread-1659 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/cluster_8b4a91a1-e44b-f735-838e-42cef32803bf/data/data3/current/BP-763120891-172.17.0.2-1731220333038/current, will proceed with Du for space computation calculation, 2024-11-10T06:32:13,641 WARN [Thread-1660 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/cluster_8b4a91a1-e44b-f735-838e-42cef32803bf/data/data4/current/BP-763120891-172.17.0.2-1731220333038/current, will proceed with Du for space computation calculation, 2024-11-10T06:32:13,658 WARN [Thread-1648 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:32:13,660 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3e81909277c9ada5 with lease ID 0x3e663ca3cf8e96b6: Processing first storage report for DS-1e785015-e836-446e-98fc-5066a4514d57 from datanode DatanodeRegistration(127.0.0.1:41021, datanodeUuid=58f00131-e7cb-4228-a7c8-956d7c65e8a4, infoPort=40497, infoSecurePort=0, ipcPort=45787, storageInfo=lv=-57;cid=testClusterID;nsid=1669176517;c=1731220333038) 2024-11-10T06:32:13,660 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3e81909277c9ada5 with lease ID 0x3e663ca3cf8e96b6: from storage DS-1e785015-e836-446e-98fc-5066a4514d57 node DatanodeRegistration(127.0.0.1:41021, datanodeUuid=58f00131-e7cb-4228-a7c8-956d7c65e8a4, infoPort=40497, infoSecurePort=0, ipcPort=45787, storageInfo=lv=-57;cid=testClusterID;nsid=1669176517;c=1731220333038), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:32:13,660 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3e81909277c9ada5 with lease ID 0x3e663ca3cf8e96b6: Processing first storage report for DS-ef2e39ad-23b9-4151-97fa-b5aaefab098d from datanode DatanodeRegistration(127.0.0.1:41021, datanodeUuid=58f00131-e7cb-4228-a7c8-956d7c65e8a4, infoPort=40497, infoSecurePort=0, ipcPort=45787, storageInfo=lv=-57;cid=testClusterID;nsid=1669176517;c=1731220333038) 2024-11-10T06:32:13,660 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3e81909277c9ada5 with lease ID 0x3e663ca3cf8e96b6: from storage DS-ef2e39ad-23b9-4151-97fa-b5aaefab098d node DatanodeRegistration(127.0.0.1:41021, datanodeUuid=58f00131-e7cb-4228-a7c8-956d7c65e8a4, infoPort=40497, infoSecurePort=0, ipcPort=45787, storageInfo=lv=-57;cid=testClusterID;nsid=1669176517;c=1731220333038), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:32:13,670 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713 2024-11-10T06:32:13,672 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/cluster_8b4a91a1-e44b-f735-838e-42cef32803bf/zookeeper_0, clientPort=59608, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/cluster_8b4a91a1-e44b-f735-838e-42cef32803bf/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/cluster_8b4a91a1-e44b-f735-838e-42cef32803bf/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T06:32:13,673 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59608 2024-11-10T06:32:13,674 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:32:13,675 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:32:13,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741825_1001 (size=7) 2024-11-10T06:32:13,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741825_1001 (size=7) 2024-11-10T06:32:13,684 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014 with version=8 2024-11-10T06:32:13,685 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/hbase-staging 2024-11-10T06:32:13,687 INFO [Time-limited test {}] client.ConnectionUtils(128): master/4999977c7e1b:0 server-side Connection retries=45 2024-11-10T06:32:13,687 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:32:13,687 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T06:32:13,687 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T06:32:13,687 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:32:13,687 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T06:32:13,687 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T06:32:13,687 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T06:32:13,688 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45029 2024-11-10T06:32:13,689 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45029 connecting to ZooKeeper ensemble=127.0.0.1:59608 2024-11-10T06:32:13,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:450290x0, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T06:32:13,695 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45029-0x10190e159d00000 connected 2024-11-10T06:32:13,708 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:32:13,710 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:32:13,712 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:32:13,712 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014, hbase.cluster.distributed=false 2024-11-10T06:32:13,713 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T06:32:13,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45029 2024-11-10T06:32:13,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45029 2024-11-10T06:32:13,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45029 2024-11-10T06:32:13,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45029 2024-11-10T06:32:13,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45029 2024-11-10T06:32:13,730 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4999977c7e1b:0 server-side Connection retries=45 2024-11-10T06:32:13,730 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:32:13,730 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T06:32:13,730 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T06:32:13,730 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:32:13,730 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T06:32:13,730 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T06:32:13,730 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T06:32:13,731 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36725 2024-11-10T06:32:13,732 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36725 connecting to ZooKeeper ensemble=127.0.0.1:59608 2024-11-10T06:32:13,733 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:32:13,734 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:32:13,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:367250x0, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T06:32:13,739 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36725-0x10190e159d00001 connected 2024-11-10T06:32:13,739 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:367250x0, quorum=127.0.0.1:59608, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:32:13,739 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T06:32:13,740 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T06:32:13,740 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36725-0x10190e159d00001, quorum=127.0.0.1:59608, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T06:32:13,741 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36725-0x10190e159d00001, quorum=127.0.0.1:59608, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T06:32:13,744 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36725 2024-11-10T06:32:13,744 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36725 2024-11-10T06:32:13,744 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36725 2024-11-10T06:32:13,745 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36725 2024-11-10T06:32:13,746 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36725 2024-11-10T06:32:13,758 DEBUG [M:0;4999977c7e1b:45029 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;4999977c7e1b:45029 2024-11-10T06:32:13,758 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/4999977c7e1b,45029,1731220333686 2024-11-10T06:32:13,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36725-0x10190e159d00001, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:32:13,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:32:13,761 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/4999977c7e1b,45029,1731220333686 2024-11-10T06:32:13,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36725-0x10190e159d00001, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T06:32:13,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:32:13,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36725-0x10190e159d00001, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:32:13,763 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T06:32:13,763 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/4999977c7e1b,45029,1731220333686 from backup master directory 2024-11-10T06:32:13,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/4999977c7e1b,45029,1731220333686 2024-11-10T06:32:13,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36725-0x10190e159d00001, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:32:13,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:32:13,765 WARN [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T06:32:13,765 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=4999977c7e1b,45029,1731220333686 2024-11-10T06:32:13,770 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/hbase.id] with ID: 4de895bd-95c7-4652-a47e-c76d4615e311 2024-11-10T06:32:13,770 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/.tmp/hbase.id 2024-11-10T06:32:13,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741826_1002 (size=42) 2024-11-10T06:32:13,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741826_1002 (size=42) 2024-11-10T06:32:13,776 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/.tmp/hbase.id]:[hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/hbase.id] 2024-11-10T06:32:13,789 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:32:13,789 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T06:32:13,790 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-10T06:32:13,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36725-0x10190e159d00001, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:32:13,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:32:13,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741827_1003 (size=196) 2024-11-10T06:32:13,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741827_1003 (size=196) 2024-11-10T06:32:13,802 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T06:32:13,803 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T06:32:13,803 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:32:13,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741828_1004 (size=1189) 2024-11-10T06:32:13,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741828_1004 (size=1189) 2024-11-10T06:32:13,811 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store 2024-11-10T06:32:13,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741829_1005 (size=34) 2024-11-10T06:32:13,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741829_1005 (size=34) 2024-11-10T06:32:13,818 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:32:13,818 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T06:32:13,818 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:32:13,818 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:32:13,818 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T06:32:13,818 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:32:13,818 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:32:13,818 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731220333818Disabling compacts and flushes for region at 1731220333818Disabling writes for close at 1731220333818Writing region close event to WAL at 1731220333818Closed at 1731220333818 2024-11-10T06:32:13,819 WARN [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/.initializing 2024-11-10T06:32:13,819 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/WALs/4999977c7e1b,45029,1731220333686 2024-11-10T06:32:13,822 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C45029%2C1731220333686, suffix=, logDir=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/WALs/4999977c7e1b,45029,1731220333686, archiveDir=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/oldWALs, maxLogs=10 2024-11-10T06:32:13,823 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C45029%2C1731220333686.1731220333822 2024-11-10T06:32:13,827 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/WALs/4999977c7e1b,45029,1731220333686/4999977c7e1b%2C45029%2C1731220333686.1731220333822 2024-11-10T06:32:13,828 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40497:40497),(127.0.0.1/127.0.0.1:39235:39235)] 2024-11-10T06:32:13,829 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T06:32:13,829 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:32:13,829 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:32:13,829 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:32:13,830 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:32:13,831 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T06:32:13,831 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:32:13,832 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:32:13,832 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:32:13,833 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T06:32:13,833 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:32:13,833 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:32:13,833 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:32:13,835 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T06:32:13,835 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:32:13,835 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:32:13,835 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:32:13,836 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T06:32:13,836 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:32:13,837 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:32:13,837 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:32:13,837 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:32:13,838 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:32:13,839 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:32:13,839 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:32:13,840 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T06:32:13,841 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:32:13,842 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T06:32:13,843 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=721916, jitterRate=-0.08203703165054321}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T06:32:13,844 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731220333829Initializing all the Stores at 1731220333830 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220333830Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220333830Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220333830Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220333830Cleaning up temporary data from old regions at 1731220333839 (+9 ms)Region opened successfully at 1731220333843 (+4 ms) 2024-11-10T06:32:13,844 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T06:32:13,847 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c872c1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4999977c7e1b/172.17.0.2:0 2024-11-10T06:32:13,848 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T06:32:13,848 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T06:32:13,848 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T06:32:13,848 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T06:32:13,849 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-10T06:32:13,849 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-10T06:32:13,849 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T06:32:13,851 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T06:32:13,852 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T06:32:13,853 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T06:32:13,854 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T06:32:13,854 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T06:32:13,856 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T06:32:13,856 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T06:32:13,857 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T06:32:13,858 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T06:32:13,859 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T06:32:13,860 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T06:32:13,862 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T06:32:13,863 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T06:32:13,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T06:32:13,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36725-0x10190e159d00001, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T06:32:13,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36725-0x10190e159d00001, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:32:13,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:32:13,866 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=4999977c7e1b,45029,1731220333686, sessionid=0x10190e159d00000, setting cluster-up flag (Was=false) 2024-11-10T06:32:13,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:32:13,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36725-0x10190e159d00001, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:32:13,874 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T06:32:13,875 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4999977c7e1b,45029,1731220333686 2024-11-10T06:32:13,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:32:13,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36725-0x10190e159d00001, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:32:13,883 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T06:32:13,884 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4999977c7e1b,45029,1731220333686 2024-11-10T06:32:13,885 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T06:32:13,887 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T06:32:13,887 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T06:32:13,887 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T06:32:13,887 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 4999977c7e1b,45029,1731220333686 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T06:32:13,888 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:32:13,888 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:32:13,888 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:32:13,888 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:32:13,888 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/4999977c7e1b:0, corePoolSize=10, maxPoolSize=10 2024-11-10T06:32:13,888 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:32:13,888 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/4999977c7e1b:0, corePoolSize=2, maxPoolSize=2 2024-11-10T06:32:13,889 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:32:13,889 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731220363889 2024-11-10T06:32:13,889 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T06:32:13,889 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T06:32:13,890 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T06:32:13,890 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T06:32:13,890 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T06:32:13,890 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T06:32:13,890 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:13,890 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:32:13,890 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T06:32:13,890 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T06:32:13,890 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T06:32:13,890 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T06:32:13,890 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T06:32:13,891 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T06:32:13,891 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220333891,5,FailOnTimeoutGroup] 2024-11-10T06:32:13,891 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220333891,5,FailOnTimeoutGroup] 2024-11-10T06:32:13,891 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:13,891 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T06:32:13,891 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:32:13,891 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:13,891 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:13,891 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T06:32:13,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741831_1007 (size=1321) 2024-11-10T06:32:13,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741831_1007 (size=1321) 2024-11-10T06:32:13,899 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T06:32:13,899 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014 2024-11-10T06:32:13,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741832_1008 (size=32) 2024-11-10T06:32:13,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741832_1008 (size=32) 2024-11-10T06:32:13,905 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:32:13,906 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T06:32:13,908 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T06:32:13,908 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:32:13,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:32:13,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T06:32:13,909 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T06:32:13,909 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:32:13,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:32:13,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T06:32:13,911 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T06:32:13,911 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:32:13,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:32:13,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T06:32:13,912 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T06:32:13,912 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:32:13,913 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:32:13,913 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T06:32:13,913 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740 2024-11-10T06:32:13,914 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740 2024-11-10T06:32:13,915 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T06:32:13,915 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T06:32:13,915 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T06:32:13,916 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T06:32:13,918 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T06:32:13,918 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=810267, jitterRate=0.030307933688163757}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T06:32:13,919 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731220333906Initializing all the Stores at 1731220333906Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220333906Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220333906Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220333906Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220333906Cleaning up temporary data from old regions at 1731220333915 (+9 ms)Region opened successfully at 1731220333919 (+4 ms) 2024-11-10T06:32:13,919 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T06:32:13,919 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T06:32:13,919 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T06:32:13,919 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T06:32:13,919 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T06:32:13,919 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T06:32:13,919 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731220333919Disabling compacts and flushes for region at 1731220333919Disabling writes for close at 1731220333919Writing region close event to WAL at 1731220333919Closed at 1731220333919 2024-11-10T06:32:13,920 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:32:13,921 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T06:32:13,921 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T06:32:13,922 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T06:32:13,923 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T06:32:13,948 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.HRegionServer(746): ClusterId : 4de895bd-95c7-4652-a47e-c76d4615e311 2024-11-10T06:32:13,948 DEBUG [RS:0;4999977c7e1b:36725 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T06:32:13,950 DEBUG [RS:0;4999977c7e1b:36725 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T06:32:13,950 DEBUG [RS:0;4999977c7e1b:36725 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T06:32:13,953 DEBUG [RS:0;4999977c7e1b:36725 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T06:32:13,953 DEBUG [RS:0;4999977c7e1b:36725 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fda83ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4999977c7e1b/172.17.0.2:0 2024-11-10T06:32:13,965 DEBUG [RS:0;4999977c7e1b:36725 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;4999977c7e1b:36725 2024-11-10T06:32:13,965 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T06:32:13,965 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T06:32:13,965 DEBUG [RS:0;4999977c7e1b:36725 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T06:32:13,966 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.HRegionServer(2659): reportForDuty to master=4999977c7e1b,45029,1731220333686 with port=36725, startcode=1731220333730 2024-11-10T06:32:13,966 DEBUG [RS:0;4999977c7e1b:36725 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T06:32:13,968 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34991, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T06:32:13,969 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45029 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4999977c7e1b,36725,1731220333730 2024-11-10T06:32:13,969 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45029 {}] master.ServerManager(517): Registering regionserver=4999977c7e1b,36725,1731220333730 2024-11-10T06:32:13,970 DEBUG [RS:0;4999977c7e1b:36725 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014 2024-11-10T06:32:13,970 DEBUG [RS:0;4999977c7e1b:36725 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40549 2024-11-10T06:32:13,970 DEBUG [RS:0;4999977c7e1b:36725 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T06:32:13,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T06:32:13,972 DEBUG [RS:0;4999977c7e1b:36725 {}] zookeeper.ZKUtil(111): regionserver:36725-0x10190e159d00001, quorum=127.0.0.1:59608, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4999977c7e1b,36725,1731220333730 2024-11-10T06:32:13,972 WARN [RS:0;4999977c7e1b:36725 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T06:32:13,972 INFO [RS:0;4999977c7e1b:36725 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:32:13,972 DEBUG [RS:0;4999977c7e1b:36725 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/WALs/4999977c7e1b,36725,1731220333730 2024-11-10T06:32:13,973 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4999977c7e1b,36725,1731220333730] 2024-11-10T06:32:13,976 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T06:32:13,978 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T06:32:13,978 INFO [RS:0;4999977c7e1b:36725 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T06:32:13,978 INFO [RS:0;4999977c7e1b:36725 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:13,979 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T06:32:13,979 INFO [RS:0;4999977c7e1b:36725 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T06:32:13,979 INFO [RS:0;4999977c7e1b:36725 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:13,980 DEBUG [RS:0;4999977c7e1b:36725 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:32:13,980 DEBUG [RS:0;4999977c7e1b:36725 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:32:13,980 DEBUG [RS:0;4999977c7e1b:36725 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:32:13,980 DEBUG [RS:0;4999977c7e1b:36725 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:32:13,980 DEBUG [RS:0;4999977c7e1b:36725 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:32:13,980 DEBUG [RS:0;4999977c7e1b:36725 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4999977c7e1b:0, corePoolSize=2, maxPoolSize=2 2024-11-10T06:32:13,980 DEBUG [RS:0;4999977c7e1b:36725 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:32:13,980 DEBUG [RS:0;4999977c7e1b:36725 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:32:13,980 DEBUG [RS:0;4999977c7e1b:36725 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:32:13,980 DEBUG [RS:0;4999977c7e1b:36725 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:32:13,980 DEBUG [RS:0;4999977c7e1b:36725 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:32:13,980 DEBUG [RS:0;4999977c7e1b:36725 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:32:13,980 DEBUG [RS:0;4999977c7e1b:36725 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4999977c7e1b:0, corePoolSize=3, maxPoolSize=3 2024-11-10T06:32:13,980 DEBUG [RS:0;4999977c7e1b:36725 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0, corePoolSize=3, maxPoolSize=3 2024-11-10T06:32:13,984 INFO [RS:0;4999977c7e1b:36725 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:13,984 INFO [RS:0;4999977c7e1b:36725 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:13,984 INFO [RS:0;4999977c7e1b:36725 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:13,984 INFO [RS:0;4999977c7e1b:36725 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:13,984 INFO [RS:0;4999977c7e1b:36725 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:13,984 INFO [RS:0;4999977c7e1b:36725 {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,36725,1731220333730-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T06:32:13,999 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T06:32:13,999 INFO [RS:0;4999977c7e1b:36725 {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,36725,1731220333730-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:13,999 INFO [RS:0;4999977c7e1b:36725 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:13,999 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.Replication(171): 4999977c7e1b,36725,1731220333730 started 2024-11-10T06:32:14,014 INFO [RS:0;4999977c7e1b:36725 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:14,014 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.HRegionServer(1482): Serving as 4999977c7e1b,36725,1731220333730, RpcServer on 4999977c7e1b/172.17.0.2:36725, sessionid=0x10190e159d00001 2024-11-10T06:32:14,014 DEBUG [RS:0;4999977c7e1b:36725 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T06:32:14,014 DEBUG [RS:0;4999977c7e1b:36725 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4999977c7e1b,36725,1731220333730 2024-11-10T06:32:14,014 DEBUG [RS:0;4999977c7e1b:36725 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4999977c7e1b,36725,1731220333730' 2024-11-10T06:32:14,014 DEBUG [RS:0;4999977c7e1b:36725 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T06:32:14,015 DEBUG [RS:0;4999977c7e1b:36725 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T06:32:14,015 DEBUG [RS:0;4999977c7e1b:36725 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T06:32:14,015 DEBUG [RS:0;4999977c7e1b:36725 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T06:32:14,015 DEBUG [RS:0;4999977c7e1b:36725 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4999977c7e1b,36725,1731220333730 2024-11-10T06:32:14,015 DEBUG [RS:0;4999977c7e1b:36725 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4999977c7e1b,36725,1731220333730' 2024-11-10T06:32:14,015 DEBUG [RS:0;4999977c7e1b:36725 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T06:32:14,015 DEBUG [RS:0;4999977c7e1b:36725 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T06:32:14,016 DEBUG [RS:0;4999977c7e1b:36725 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T06:32:14,016 INFO [RS:0;4999977c7e1b:36725 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T06:32:14,016 INFO [RS:0;4999977c7e1b:36725 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T06:32:14,073 WARN [4999977c7e1b:45029 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-10T06:32:14,118 INFO [RS:0;4999977c7e1b:36725 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C36725%2C1731220333730, suffix=, logDir=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/WALs/4999977c7e1b,36725,1731220333730, archiveDir=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/oldWALs, maxLogs=32 2024-11-10T06:32:14,118 INFO [RS:0;4999977c7e1b:36725 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C36725%2C1731220333730.1731220334118 2024-11-10T06:32:14,124 INFO [RS:0;4999977c7e1b:36725 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/WALs/4999977c7e1b,36725,1731220333730/4999977c7e1b%2C36725%2C1731220333730.1731220334118 2024-11-10T06:32:14,130 DEBUG [RS:0;4999977c7e1b:36725 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40497:40497),(127.0.0.1/127.0.0.1:39235:39235)] 2024-11-10T06:32:14,324 DEBUG [4999977c7e1b:45029 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-10T06:32:14,324 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=4999977c7e1b,36725,1731220333730 2024-11-10T06:32:14,326 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4999977c7e1b,36725,1731220333730, state=OPENING 2024-11-10T06:32:14,328 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T06:32:14,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36725-0x10190e159d00001, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:32:14,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:32:14,330 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:32:14,330 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T06:32:14,330 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:32:14,330 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=4999977c7e1b,36725,1731220333730}] 2024-11-10T06:32:14,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:14,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:14,482 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T06:32:14,484 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55823, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T06:32:14,488 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T06:32:14,488 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:32:14,490 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C36725%2C1731220333730.meta, suffix=.meta, logDir=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/WALs/4999977c7e1b,36725,1731220333730, archiveDir=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/oldWALs, maxLogs=32 2024-11-10T06:32:14,490 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C36725%2C1731220333730.meta.1731220334490.meta 2024-11-10T06:32:14,495 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/WALs/4999977c7e1b,36725,1731220333730/4999977c7e1b%2C36725%2C1731220333730.meta.1731220334490.meta 2024-11-10T06:32:14,496 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40497:40497),(127.0.0.1/127.0.0.1:39235:39235)] 2024-11-10T06:32:14,497 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T06:32:14,497 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T06:32:14,497 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T06:32:14,497 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T06:32:14,497 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T06:32:14,497 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:32:14,498 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T06:32:14,498 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T06:32:14,499 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T06:32:14,500 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T06:32:14,500 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:32:14,500 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:32:14,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T06:32:14,501 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T06:32:14,501 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:32:14,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:32:14,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T06:32:14,502 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T06:32:14,502 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:32:14,503 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:32:14,503 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T06:32:14,503 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T06:32:14,503 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:32:14,504 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:32:14,504 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T06:32:14,505 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740 2024-11-10T06:32:14,505 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740 2024-11-10T06:32:14,507 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T06:32:14,507 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T06:32:14,507 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T06:32:14,508 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T06:32:14,509 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=806081, jitterRate=0.024985790252685547}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T06:32:14,509 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T06:32:14,510 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731220334498Writing region info on filesystem at 1731220334498Initializing all the Stores at 1731220334499 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220334499Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220334499Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220334499Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220334499Cleaning up temporary data from old regions at 1731220334507 (+8 ms)Running coprocessor post-open hooks at 1731220334509 (+2 ms)Region opened successfully at 1731220334510 (+1 ms) 2024-11-10T06:32:14,511 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731220334482 2024-11-10T06:32:14,513 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T06:32:14,513 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T06:32:14,514 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=4999977c7e1b,36725,1731220333730 2024-11-10T06:32:14,515 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4999977c7e1b,36725,1731220333730, state=OPEN 2024-11-10T06:32:14,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36725-0x10190e159d00001, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T06:32:14,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T06:32:14,520 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=4999977c7e1b,36725,1731220333730 2024-11-10T06:32:14,520 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:32:14,520 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:32:14,522 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T06:32:14,522 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=4999977c7e1b,36725,1731220333730 in 190 msec 2024-11-10T06:32:14,525 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T06:32:14,525 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 601 msec 2024-11-10T06:32:14,526 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:32:14,526 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T06:32:14,527 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T06:32:14,527 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4999977c7e1b,36725,1731220333730, seqNum=-1] 2024-11-10T06:32:14,527 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T06:32:14,529 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51631, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T06:32:14,534 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 647 msec 2024-11-10T06:32:14,534 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731220334534, completionTime=-1 2024-11-10T06:32:14,534 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-10T06:32:14,534 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-10T06:32:14,536 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-10T06:32:14,536 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731220394536 2024-11-10T06:32:14,536 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731220454536 2024-11-10T06:32:14,536 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-10T06:32:14,536 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,45029,1731220333686-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:14,537 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,45029,1731220333686-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:14,537 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,45029,1731220333686-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:14,537 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-4999977c7e1b:45029, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:14,537 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:14,537 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:14,538 DEBUG [master/4999977c7e1b:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T06:32:14,540 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.775sec 2024-11-10T06:32:14,540 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T06:32:14,541 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T06:32:14,541 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T06:32:14,541 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T06:32:14,541 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T06:32:14,541 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,45029,1731220333686-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T06:32:14,541 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,45029,1731220333686-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T06:32:14,543 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T06:32:14,543 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T06:32:14,543 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,45029,1731220333686-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:14,548 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a79e046, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:32:14,548 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 4999977c7e1b,45029,-1 for getting cluster id 2024-11-10T06:32:14,548 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T06:32:14,550 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4de895bd-95c7-4652-a47e-c76d4615e311' 2024-11-10T06:32:14,550 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T06:32:14,551 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4de895bd-95c7-4652-a47e-c76d4615e311" 2024-11-10T06:32:14,551 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ab0c5db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:32:14,551 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4999977c7e1b,45029,-1] 2024-11-10T06:32:14,551 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T06:32:14,551 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:32:14,552 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57164, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T06:32:14,553 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38a6c73c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:32:14,554 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T06:32:14,555 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4999977c7e1b,36725,1731220333730, seqNum=-1] 2024-11-10T06:32:14,555 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T06:32:14,556 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38102, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T06:32:14,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=4999977c7e1b,45029,1731220333686 2024-11-10T06:32:14,557 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:32:14,560 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-10T06:32:14,560 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-10T06:32:14,561 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 4999977c7e1b,45029,1731220333686 2024-11-10T06:32:14,561 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@58b6f617 2024-11-10T06:32:14,561 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-10T06:32:14,562 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57166, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-10T06:32:14,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-10T06:32:14,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-10T06:32:14,563 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T06:32:14,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T06:32:14,566 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-10T06:32:14,566 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:32:14,566 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-10T06:32:14,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T06:32:14,568 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-10T06:32:14,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741835_1011 (size=405) 2024-11-10T06:32:14,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741835_1011 (size=405) 2024-11-10T06:32:14,576 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c1b8f3fdb26c51dd07111269bf7b615a, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014 2024-11-10T06:32:14,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741836_1012 (size=88) 2024-11-10T06:32:14,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741836_1012 (size=88) 2024-11-10T06:32:14,583 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:32:14,583 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing c1b8f3fdb26c51dd07111269bf7b615a, disabling compactions & flushes 2024-11-10T06:32:14,583 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:32:14,583 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:32:14,583 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. after waiting 0 ms 2024-11-10T06:32:14,583 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:32:14,583 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:32:14,583 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for c1b8f3fdb26c51dd07111269bf7b615a: Waiting for close lock at 1731220334583Disabling compacts and flushes for region at 1731220334583Disabling writes for close at 1731220334583Writing region close event to WAL at 1731220334583Closed at 1731220334583 2024-11-10T06:32:14,585 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-10T06:32:14,585 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731220334585"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731220334585"}]},"ts":"1731220334585"} 2024-11-10T06:32:14,587 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-10T06:32:14,588 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-10T06:32:14,589 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731220334589"}]},"ts":"1731220334589"} 2024-11-10T06:32:14,591 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-10T06:32:14,591 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c1b8f3fdb26c51dd07111269bf7b615a, ASSIGN}] 2024-11-10T06:32:14,592 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c1b8f3fdb26c51dd07111269bf7b615a, ASSIGN 2024-11-10T06:32:14,593 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c1b8f3fdb26c51dd07111269bf7b615a, ASSIGN; state=OFFLINE, location=4999977c7e1b,36725,1731220333730; forceNewPlan=false, retain=false 2024-11-10T06:32:14,744 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c1b8f3fdb26c51dd07111269bf7b615a, regionState=OPENING, regionLocation=4999977c7e1b,36725,1731220333730 2024-11-10T06:32:14,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c1b8f3fdb26c51dd07111269bf7b615a, ASSIGN because future has completed 2024-11-10T06:32:14,747 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c1b8f3fdb26c51dd07111269bf7b615a, server=4999977c7e1b,36725,1731220333730}] 2024-11-10T06:32:14,904 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:32:14,904 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c1b8f3fdb26c51dd07111269bf7b615a, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a.', STARTKEY => '', ENDKEY => ''} 2024-11-10T06:32:14,904 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling c1b8f3fdb26c51dd07111269bf7b615a 2024-11-10T06:32:14,904 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:32:14,904 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c1b8f3fdb26c51dd07111269bf7b615a 2024-11-10T06:32:14,904 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c1b8f3fdb26c51dd07111269bf7b615a 2024-11-10T06:32:14,906 INFO [StoreOpener-c1b8f3fdb26c51dd07111269bf7b615a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c1b8f3fdb26c51dd07111269bf7b615a 2024-11-10T06:32:14,907 INFO [StoreOpener-c1b8f3fdb26c51dd07111269bf7b615a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c1b8f3fdb26c51dd07111269bf7b615a columnFamilyName info 2024-11-10T06:32:14,907 DEBUG [StoreOpener-c1b8f3fdb26c51dd07111269bf7b615a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:32:14,908 INFO [StoreOpener-c1b8f3fdb26c51dd07111269bf7b615a-1 {}] regionserver.HStore(327): Store=c1b8f3fdb26c51dd07111269bf7b615a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:32:14,908 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c1b8f3fdb26c51dd07111269bf7b615a 2024-11-10T06:32:14,908 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a 2024-11-10T06:32:14,909 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a 2024-11-10T06:32:14,909 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c1b8f3fdb26c51dd07111269bf7b615a 2024-11-10T06:32:14,909 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c1b8f3fdb26c51dd07111269bf7b615a 2024-11-10T06:32:14,911 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c1b8f3fdb26c51dd07111269bf7b615a 2024-11-10T06:32:14,913 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T06:32:14,913 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c1b8f3fdb26c51dd07111269bf7b615a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=716893, jitterRate=-0.08842384815216064}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T06:32:14,913 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c1b8f3fdb26c51dd07111269bf7b615a 2024-11-10T06:32:14,914 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c1b8f3fdb26c51dd07111269bf7b615a: Running coprocessor pre-open hook at 1731220334905Writing region info on filesystem at 1731220334905Initializing all the Stores at 1731220334905Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220334905Cleaning up temporary data from old regions at 1731220334909 (+4 ms)Running coprocessor post-open hooks at 1731220334913 (+4 ms)Region opened successfully at 1731220334914 (+1 ms) 2024-11-10T06:32:14,915 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a., pid=6, masterSystemTime=1731220334900 2024-11-10T06:32:14,917 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:32:14,917 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:32:14,918 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c1b8f3fdb26c51dd07111269bf7b615a, regionState=OPEN, openSeqNum=2, regionLocation=4999977c7e1b,36725,1731220333730 2024-11-10T06:32:14,921 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c1b8f3fdb26c51dd07111269bf7b615a, server=4999977c7e1b,36725,1731220333730 because future has completed 2024-11-10T06:32:14,925 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-10T06:32:14,926 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c1b8f3fdb26c51dd07111269bf7b615a, server=4999977c7e1b,36725,1731220333730 in 175 msec 2024-11-10T06:32:14,928 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-10T06:32:14,928 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c1b8f3fdb26c51dd07111269bf7b615a, ASSIGN in 335 msec 2024-11-10T06:32:14,929 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-10T06:32:14,929 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731220334929"}]},"ts":"1731220334929"} 2024-11-10T06:32:14,931 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-10T06:32:14,932 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-10T06:32:14,934 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 369 msec 2024-11-10T06:32:15,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:15,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:15,498 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-10T06:32:15,498 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-10T06:32:15,499 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T06:32:15,499 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-10T06:32:15,499 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T06:32:15,499 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-10T06:32:16,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:16,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:17,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:17,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:18,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:18,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:19,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:19,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:19,999 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-10T06:32:20,000 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:20,000 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:20,001 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:20,001 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:20,001 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:20,001 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:20,017 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:20,017 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:20,017 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:20,018 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:20,018 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:20,018 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:20,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:20,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:20,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:20,024 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:32:20,028 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-10T06:32:20,028 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-10T06:32:20,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:20,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:21,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:21,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:22,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:22,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:23,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:23,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:24,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:24,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:24,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T06:32:24,596 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-10T06:32:24,596 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-10T06:32:24,599 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T06:32:24,599 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:32:24,602 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a., hostname=4999977c7e1b,36725,1731220333730, seqNum=2] 2024-11-10T06:32:24,609 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T06:32:24,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T06:32:24,615 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-10T06:32:24,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T06:32:24,616 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-10T06:32:24,618 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-10T06:32:24,777 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36725 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-10T06:32:24,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:32:24,778 INFO [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing c1b8f3fdb26c51dd07111269bf7b615a 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-10T06:32:24,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/.tmp/info/0c3eb5adb4bd4702be3b65551b90e06a is 1080, key is row0001/info:/1731220344603/Put/seqid=0 2024-11-10T06:32:24,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741837_1013 (size=6033) 2024-11-10T06:32:24,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741837_1013 (size=6033) 2024-11-10T06:32:24,801 INFO [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/.tmp/info/0c3eb5adb4bd4702be3b65551b90e06a 2024-11-10T06:32:24,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/.tmp/info/0c3eb5adb4bd4702be3b65551b90e06a as hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/0c3eb5adb4bd4702be3b65551b90e06a 2024-11-10T06:32:24,813 INFO [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/0c3eb5adb4bd4702be3b65551b90e06a, entries=1, sequenceid=5, filesize=5.9 K 2024-11-10T06:32:24,814 INFO [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c1b8f3fdb26c51dd07111269bf7b615a in 36ms, sequenceid=5, compaction requested=false 2024-11-10T06:32:24,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for c1b8f3fdb26c51dd07111269bf7b615a: 2024-11-10T06:32:24,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:32:24,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-10T06:32:24,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-10T06:32:24,822 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-10T06:32:24,822 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 201 msec 2024-11-10T06:32:24,824 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 212 msec 2024-11-10T06:32:25,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:25,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:26,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:26,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:27,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:27,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:28,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:28,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:29,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:29,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:30,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:30,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:31,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:31,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:32,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:32,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:33,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:33,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:34,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:34,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:34,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T06:32:34,666 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-10T06:32:34,669 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T06:32:34,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T06:32:34,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-10T06:32:34,672 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-10T06:32:34,673 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-10T06:32:34,673 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-10T06:32:34,826 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36725 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-10T06:32:34,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:32:34,827 INFO [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing c1b8f3fdb26c51dd07111269bf7b615a 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-10T06:32:34,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/.tmp/info/631276aa490f420db12d607598f0e6bb is 1080, key is row0002/info:/1731220354667/Put/seqid=0 2024-11-10T06:32:34,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741838_1014 (size=6033) 2024-11-10T06:32:34,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741838_1014 (size=6033) 2024-11-10T06:32:34,839 INFO [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/.tmp/info/631276aa490f420db12d607598f0e6bb 2024-11-10T06:32:34,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/.tmp/info/631276aa490f420db12d607598f0e6bb as hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/631276aa490f420db12d607598f0e6bb 2024-11-10T06:32:34,850 INFO [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/631276aa490f420db12d607598f0e6bb, entries=1, sequenceid=9, filesize=5.9 K 2024-11-10T06:32:34,851 INFO [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c1b8f3fdb26c51dd07111269bf7b615a in 24ms, sequenceid=9, compaction requested=false 2024-11-10T06:32:34,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for c1b8f3fdb26c51dd07111269bf7b615a: 2024-11-10T06:32:34,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:32:34,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-10T06:32:34,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-10T06:32:34,855 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-10T06:32:34,856 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 180 msec 2024-11-10T06:32:34,858 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 187 msec 2024-11-10T06:32:35,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:35,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:36,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:36,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:37,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:37,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:38,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:38,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:39,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:39,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:40,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:40,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:40,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta after 68037ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:32:40,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 after 68048ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T06:32:41,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:41,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:42,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:42,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:43,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:43,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:43,670 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T06:32:44,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:44,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:44,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-10T06:32:44,776 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-10T06:32:44,778 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C36725%2C1731220333730.1731220364778 2024-11-10T06:32:44,784 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:44,784 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:44,784 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:44,784 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:44,784 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:44,784 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/WALs/4999977c7e1b,36725,1731220333730/4999977c7e1b%2C36725%2C1731220333730.1731220334118 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/WALs/4999977c7e1b,36725,1731220333730/4999977c7e1b%2C36725%2C1731220333730.1731220364778 2024-11-10T06:32:44,785 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39235:39235),(127.0.0.1/127.0.0.1:40497:40497)] 2024-11-10T06:32:44,785 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/WALs/4999977c7e1b,36725,1731220333730/4999977c7e1b%2C36725%2C1731220333730.1731220334118 is not closed yet, will try archiving it next time 2024-11-10T06:32:44,786 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T06:32:44,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741833_1009 (size=5546) 2024-11-10T06:32:44,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741833_1009 (size=5546) 2024-11-10T06:32:44,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T06:32:44,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-10T06:32:44,788 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-10T06:32:44,789 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-10T06:32:44,790 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-10T06:32:44,942 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36725 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-10T06:32:44,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:32:44,943 INFO [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing c1b8f3fdb26c51dd07111269bf7b615a 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-10T06:32:44,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/.tmp/info/34f2b20b614d4598810d1f3257ee2b18 is 1080, key is row0003/info:/1731220364777/Put/seqid=0 2024-11-10T06:32:44,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741840_1016 (size=6033) 2024-11-10T06:32:44,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741840_1016 (size=6033) 2024-11-10T06:32:44,953 INFO [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/.tmp/info/34f2b20b614d4598810d1f3257ee2b18 2024-11-10T06:32:44,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/.tmp/info/34f2b20b614d4598810d1f3257ee2b18 as hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/34f2b20b614d4598810d1f3257ee2b18 2024-11-10T06:32:44,965 INFO [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/34f2b20b614d4598810d1f3257ee2b18, entries=1, sequenceid=13, filesize=5.9 K 2024-11-10T06:32:44,966 INFO [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c1b8f3fdb26c51dd07111269bf7b615a in 23ms, sequenceid=13, compaction requested=true 2024-11-10T06:32:44,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for c1b8f3fdb26c51dd07111269bf7b615a: 2024-11-10T06:32:44,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:32:44,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-10T06:32:44,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-10T06:32:44,971 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-10T06:32:44,971 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-11-10T06:32:44,973 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-11-10T06:32:45,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:45,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:46,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:46,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:47,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:47,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:48,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:48,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:49,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:49,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:50,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:50,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:51,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:51,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:52,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:52,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:53,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:53,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:54,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:54,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:54,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-10T06:32:54,826 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-10T06:32:54,826 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T06:32:54,827 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T06:32:54,828 DEBUG [Time-limited test {}] regionserver.HStore(1541): c1b8f3fdb26c51dd07111269bf7b615a/info is initiating minor compaction (all files) 2024-11-10T06:32:54,828 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T06:32:54,828 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:32:54,828 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of c1b8f3fdb26c51dd07111269bf7b615a/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:32:54,828 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/0c3eb5adb4bd4702be3b65551b90e06a, hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/631276aa490f420db12d607598f0e6bb, hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/34f2b20b614d4598810d1f3257ee2b18] into tmpdir=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/.tmp, totalSize=17.7 K 2024-11-10T06:32:54,829 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 0c3eb5adb4bd4702be3b65551b90e06a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731220344603 2024-11-10T06:32:54,829 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 631276aa490f420db12d607598f0e6bb, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731220354667 2024-11-10T06:32:54,829 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 34f2b20b614d4598810d1f3257ee2b18, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731220364777 2024-11-10T06:32:54,841 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): c1b8f3fdb26c51dd07111269bf7b615a#info#compaction#44 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T06:32:54,841 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/.tmp/info/486dd8f0fd1545b590f64134949bff36 is 1080, key is row0001/info:/1731220344603/Put/seqid=0 2024-11-10T06:32:54,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741841_1017 (size=8296) 2024-11-10T06:32:54,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741841_1017 (size=8296) 2024-11-10T06:32:54,852 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/.tmp/info/486dd8f0fd1545b590f64134949bff36 as hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/486dd8f0fd1545b590f64134949bff36 2024-11-10T06:32:54,858 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c1b8f3fdb26c51dd07111269bf7b615a/info of c1b8f3fdb26c51dd07111269bf7b615a into 486dd8f0fd1545b590f64134949bff36(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T06:32:54,858 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for c1b8f3fdb26c51dd07111269bf7b615a: 2024-11-10T06:32:54,860 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C36725%2C1731220333730.1731220374860 2024-11-10T06:32:54,866 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:54,866 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:54,866 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:54,866 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:54,866 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:32:54,866 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/WALs/4999977c7e1b,36725,1731220333730/4999977c7e1b%2C36725%2C1731220333730.1731220364778 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/WALs/4999977c7e1b,36725,1731220333730/4999977c7e1b%2C36725%2C1731220333730.1731220374860 2024-11-10T06:32:54,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741839_1015 (size=2520) 2024-11-10T06:32:54,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741839_1015 (size=2520) 2024-11-10T06:32:54,870 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39235:39235),(127.0.0.1/127.0.0.1:40497:40497)] 2024-11-10T06:32:54,870 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/WALs/4999977c7e1b,36725,1731220333730/4999977c7e1b%2C36725%2C1731220333730.1731220334118 to hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/oldWALs/4999977c7e1b%2C36725%2C1731220333730.1731220334118 2024-11-10T06:32:54,871 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T06:32:54,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T06:32:54,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-10T06:32:54,874 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-10T06:32:54,875 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-10T06:32:54,875 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-10T06:32:55,028 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36725 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-10T06:32:55,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:32:55,028 INFO [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing c1b8f3fdb26c51dd07111269bf7b615a 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-10T06:32:55,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/.tmp/info/443e92e3bb48496889c7fe695ba2d24c is 1080, key is row0000/info:/1731220374859/Put/seqid=0 2024-11-10T06:32:55,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741843_1019 (size=6033) 2024-11-10T06:32:55,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741843_1019 (size=6033) 2024-11-10T06:32:55,039 INFO [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/.tmp/info/443e92e3bb48496889c7fe695ba2d24c 2024-11-10T06:32:55,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/.tmp/info/443e92e3bb48496889c7fe695ba2d24c as hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/443e92e3bb48496889c7fe695ba2d24c 2024-11-10T06:32:55,050 INFO [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/443e92e3bb48496889c7fe695ba2d24c, entries=1, sequenceid=18, filesize=5.9 K 2024-11-10T06:32:55,051 INFO [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c1b8f3fdb26c51dd07111269bf7b615a in 23ms, sequenceid=18, compaction requested=false 2024-11-10T06:32:55,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for c1b8f3fdb26c51dd07111269bf7b615a: 2024-11-10T06:32:55,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:32:55,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-10T06:32:55,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-10T06:32:55,056 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-10T06:32:55,056 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-11-10T06:32:55,058 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-11-10T06:32:55,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:55,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:55,481 INFO [master/4999977c7e1b:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-10T06:32:55,481 INFO [master/4999977c7e1b:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-10T06:32:56,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:56,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:57,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:57,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:58,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:58,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:59,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:59,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:32:59,905 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c1b8f3fdb26c51dd07111269bf7b615a, had cached 0 bytes from a total of 14329 2024-11-10T06:33:00,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:00,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:01,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:01,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:02,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:02,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:03,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:03,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:04,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:04,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:04,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45029 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-10T06:33:04,946 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-10T06:33:04,948 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C36725%2C1731220333730.1731220384948 2024-11-10T06:33:04,954 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:33:04,954 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:33:04,955 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:33:04,955 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:33:04,955 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:33:04,955 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/WALs/4999977c7e1b,36725,1731220333730/4999977c7e1b%2C36725%2C1731220333730.1731220374860 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/WALs/4999977c7e1b,36725,1731220333730/4999977c7e1b%2C36725%2C1731220333730.1731220384948 2024-11-10T06:33:04,956 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39235:39235),(127.0.0.1/127.0.0.1:40497:40497)] 2024-11-10T06:33:04,956 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/WALs/4999977c7e1b,36725,1731220333730/4999977c7e1b%2C36725%2C1731220333730.1731220374860 is not closed yet, will try archiving it next time 2024-11-10T06:33:04,956 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/WALs/4999977c7e1b,36725,1731220333730/4999977c7e1b%2C36725%2C1731220333730.1731220364778 to hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/oldWALs/4999977c7e1b%2C36725%2C1731220333730.1731220364778 2024-11-10T06:33:04,956 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T06:33:04,956 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T06:33:04,956 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:33:04,956 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:33:04,956 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:33:04,956 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T06:33:04,957 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T06:33:04,957 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1520150268, stopped=false 2024-11-10T06:33:04,957 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=4999977c7e1b,45029,1731220333686 2024-11-10T06:33:04,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741842_1018 (size=2026) 2024-11-10T06:33:04,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741842_1018 (size=2026) 2024-11-10T06:33:04,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T06:33:04,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36725-0x10190e159d00001, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T06:33:04,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:33:04,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36725-0x10190e159d00001, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:33:04,959 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T06:33:04,959 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T06:33:04,959 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:33:04,959 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:33:04,959 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4999977c7e1b,36725,1731220333730' ***** 2024-11-10T06:33:04,959 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T06:33:04,959 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36725-0x10190e159d00001, quorum=127.0.0.1:59608, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:33:04,960 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:33:04,960 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T06:33:04,960 INFO [RS:0;4999977c7e1b:36725 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T06:33:04,960 INFO [RS:0;4999977c7e1b:36725 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T06:33:04,960 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.HRegionServer(3091): Received CLOSE for c1b8f3fdb26c51dd07111269bf7b615a 2024-11-10T06:33:04,960 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T06:33:04,960 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.HRegionServer(959): stopping server 4999977c7e1b,36725,1731220333730 2024-11-10T06:33:04,960 INFO [RS:0;4999977c7e1b:36725 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T06:33:04,960 INFO [RS:0;4999977c7e1b:36725 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;4999977c7e1b:36725. 2024-11-10T06:33:04,960 DEBUG [RS:0;4999977c7e1b:36725 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:33:04,960 DEBUG [RS:0;4999977c7e1b:36725 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:33:04,960 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c1b8f3fdb26c51dd07111269bf7b615a, disabling compactions & flushes 2024-11-10T06:33:04,960 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T06:33:04,960 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:33:04,960 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T06:33:04,960 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T06:33:04,960 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:33:04,960 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. after waiting 0 ms 2024-11-10T06:33:04,960 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:33:04,960 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T06:33:04,961 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing c1b8f3fdb26c51dd07111269bf7b615a 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-10T06:33:04,961 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-10T06:33:04,961 DEBUG [RS:0;4999977c7e1b:36725 {}] regionserver.HRegionServer(1325): Online Regions={c1b8f3fdb26c51dd07111269bf7b615a=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a., 1588230740=hbase:meta,,1.1588230740} 2024-11-10T06:33:04,961 DEBUG [RS:0;4999977c7e1b:36725 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c1b8f3fdb26c51dd07111269bf7b615a 2024-11-10T06:33:04,961 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T06:33:04,961 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T06:33:04,961 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T06:33:04,961 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T06:33:04,961 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T06:33:04,961 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-10T06:33:04,966 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/.tmp/info/c671e778863943a79b827660d6e8287d is 1080, key is row0001/info:/1731220384947/Put/seqid=0 2024-11-10T06:33:04,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741845_1021 (size=6033) 2024-11-10T06:33:04,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741845_1021 (size=6033) 2024-11-10T06:33:04,971 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/.tmp/info/c671e778863943a79b827660d6e8287d 2024-11-10T06:33:04,977 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/.tmp/info/c671e778863943a79b827660d6e8287d as hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/c671e778863943a79b827660d6e8287d 2024-11-10T06:33:04,982 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/c671e778863943a79b827660d6e8287d, entries=1, sequenceid=22, filesize=5.9 K 2024-11-10T06:33:04,983 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c1b8f3fdb26c51dd07111269bf7b615a in 23ms, sequenceid=22, compaction requested=true 2024-11-10T06:33:04,984 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/0c3eb5adb4bd4702be3b65551b90e06a, hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/631276aa490f420db12d607598f0e6bb, hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/34f2b20b614d4598810d1f3257ee2b18] to archive 2024-11-10T06:33:04,985 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-10T06:33:04,985 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740/.tmp/info/8092ae5ccbee49cfa066ba002c55b6ca is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a./info:regioninfo/1731220334918/Put/seqid=0 2024-11-10T06:33:04,986 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/0c3eb5adb4bd4702be3b65551b90e06a to hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/0c3eb5adb4bd4702be3b65551b90e06a 2024-11-10T06:33:04,988 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/631276aa490f420db12d607598f0e6bb to hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/631276aa490f420db12d607598f0e6bb 2024-11-10T06:33:04,989 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/34f2b20b614d4598810d1f3257ee2b18 to hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/info/34f2b20b614d4598810d1f3257ee2b18 2024-11-10T06:33:04,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741846_1022 (size=7308) 2024-11-10T06:33:04,989 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=4999977c7e1b:45029 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-10T06:33:04,990 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [0c3eb5adb4bd4702be3b65551b90e06a=6033, 631276aa490f420db12d607598f0e6bb=6033, 34f2b20b614d4598810d1f3257ee2b18=6033] 2024-11-10T06:33:04,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741846_1022 (size=7308) 2024-11-10T06:33:04,991 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740/.tmp/info/8092ae5ccbee49cfa066ba002c55b6ca 2024-11-10T06:33:04,994 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c1b8f3fdb26c51dd07111269bf7b615a/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-10T06:33:04,994 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:33:04,995 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c1b8f3fdb26c51dd07111269bf7b615a: Waiting for close lock at 1731220384960Running coprocessor pre-close hooks at 1731220384960Disabling compacts and flushes for region at 1731220384960Disabling writes for close at 1731220384960Obtaining lock to block concurrent updates at 1731220384961 (+1 ms)Preparing flush snapshotting stores in c1b8f3fdb26c51dd07111269bf7b615a at 1731220384961Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731220384961Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. at 1731220384961Flushing c1b8f3fdb26c51dd07111269bf7b615a/info: creating writer at 1731220384962 (+1 ms)Flushing c1b8f3fdb26c51dd07111269bf7b615a/info: appending metadata at 1731220384965 (+3 ms)Flushing c1b8f3fdb26c51dd07111269bf7b615a/info: closing flushed file at 1731220384965Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1bea174a: reopening flushed file at 1731220384976 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c1b8f3fdb26c51dd07111269bf7b615a in 23ms, sequenceid=22, compaction requested=true at 1731220384983 (+7 ms)Writing region close event to WAL at 1731220384990 (+7 ms)Running coprocessor post-close hooks at 1731220384994 (+4 ms)Closed at 1731220384994 2024-11-10T06:33:04,995 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731220334562.c1b8f3fdb26c51dd07111269bf7b615a. 2024-11-10T06:33:05,010 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740/.tmp/ns/5ee9e24ef8ba42d9af80f07d2c4921b1 is 43, key is default/ns:d/1731220334529/Put/seqid=0 2024-11-10T06:33:05,011 INFO [regionserver/4999977c7e1b:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-10T06:33:05,011 INFO [regionserver/4999977c7e1b:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-10T06:33:05,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741847_1023 (size=5153) 2024-11-10T06:33:05,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741847_1023 (size=5153) 2024-11-10T06:33:05,016 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740/.tmp/ns/5ee9e24ef8ba42d9af80f07d2c4921b1 2024-11-10T06:33:05,035 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740/.tmp/table/3f2e11652645474ca38a75db96ef0a56 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731220334929/Put/seqid=0 2024-11-10T06:33:05,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741848_1024 (size=5508) 2024-11-10T06:33:05,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741848_1024 (size=5508) 2024-11-10T06:33:05,040 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740/.tmp/table/3f2e11652645474ca38a75db96ef0a56 2024-11-10T06:33:05,046 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740/.tmp/info/8092ae5ccbee49cfa066ba002c55b6ca as hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740/info/8092ae5ccbee49cfa066ba002c55b6ca 2024-11-10T06:33:05,050 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740/info/8092ae5ccbee49cfa066ba002c55b6ca, entries=10, sequenceid=11, filesize=7.1 K 2024-11-10T06:33:05,051 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740/.tmp/ns/5ee9e24ef8ba42d9af80f07d2c4921b1 as hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740/ns/5ee9e24ef8ba42d9af80f07d2c4921b1 2024-11-10T06:33:05,055 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740/ns/5ee9e24ef8ba42d9af80f07d2c4921b1, entries=2, sequenceid=11, filesize=5.0 K 2024-11-10T06:33:05,056 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740/.tmp/table/3f2e11652645474ca38a75db96ef0a56 as hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740/table/3f2e11652645474ca38a75db96ef0a56 2024-11-10T06:33:05,061 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740/table/3f2e11652645474ca38a75db96ef0a56, entries=2, sequenceid=11, filesize=5.4 K 2024-11-10T06:33:05,062 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 101ms, sequenceid=11, compaction requested=false 2024-11-10T06:33:05,066 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-10T06:33:05,067 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T06:33:05,067 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T06:33:05,067 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731220384961Running coprocessor pre-close hooks at 1731220384961Disabling compacts and flushes for region at 1731220384961Disabling writes for close at 1731220384961Obtaining lock to block concurrent updates at 1731220384961Preparing flush snapshotting stores in 1588230740 at 1731220384961Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731220384961Flushing stores of hbase:meta,,1.1588230740 at 1731220384962 (+1 ms)Flushing 1588230740/info: creating writer at 1731220384962Flushing 1588230740/info: appending metadata at 1731220384985 (+23 ms)Flushing 1588230740/info: closing flushed file at 1731220384985Flushing 1588230740/ns: creating writer at 1731220384996 (+11 ms)Flushing 1588230740/ns: appending metadata at 1731220385010 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731220385010Flushing 1588230740/table: creating writer at 1731220385020 (+10 ms)Flushing 1588230740/table: appending metadata at 1731220385034 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731220385034Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27cac80f: reopening flushed file at 1731220385045 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@647e6847: reopening flushed file at 1731220385050 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@91c2a8b: reopening flushed file at 1731220385056 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 101ms, sequenceid=11, compaction requested=false at 1731220385062 (+6 ms)Writing region close event to WAL at 1731220385063 (+1 ms)Running coprocessor post-close hooks at 1731220385067 (+4 ms)Closed at 1731220385067 2024-11-10T06:33:05,067 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T06:33:05,161 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.HRegionServer(976): stopping server 4999977c7e1b,36725,1731220333730; all regions closed. 2024-11-10T06:33:05,161 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:33:05,162 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:33:05,162 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:33:05,162 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:33:05,162 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:33:05,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741834_1010 (size=3306) 2024-11-10T06:33:05,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741834_1010 (size=3306) 2024-11-10T06:33:05,166 DEBUG [RS:0;4999977c7e1b:36725 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/oldWALs 2024-11-10T06:33:05,166 INFO [RS:0;4999977c7e1b:36725 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 4999977c7e1b%2C36725%2C1731220333730.meta:.meta(num 1731220334490) 2024-11-10T06:33:05,167 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:33:05,167 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:33:05,167 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:33:05,167 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:33:05,167 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:33:05,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741844_1020 (size=1252) 2024-11-10T06:33:05,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741844_1020 (size=1252) 2024-11-10T06:33:05,171 DEBUG [RS:0;4999977c7e1b:36725 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/oldWALs 2024-11-10T06:33:05,171 INFO [RS:0;4999977c7e1b:36725 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 4999977c7e1b%2C36725%2C1731220333730:(num 1731220384948) 2024-11-10T06:33:05,171 DEBUG [RS:0;4999977c7e1b:36725 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:33:05,171 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T06:33:05,172 INFO [RS:0;4999977c7e1b:36725 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T06:33:05,172 INFO [RS:0;4999977c7e1b:36725 {}] hbase.ChoreService(370): Chore service for: regionserver/4999977c7e1b:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-10T06:33:05,172 INFO [RS:0;4999977c7e1b:36725 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T06:33:05,172 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T06:33:05,172 INFO [RS:0;4999977c7e1b:36725 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36725 2024-11-10T06:33:05,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36725-0x10190e159d00001, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4999977c7e1b,36725,1731220333730 2024-11-10T06:33:05,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T06:33:05,174 INFO [RS:0;4999977c7e1b:36725 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T06:33:05,176 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4999977c7e1b,36725,1731220333730] 2024-11-10T06:33:05,177 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4999977c7e1b,36725,1731220333730 already deleted, retry=false 2024-11-10T06:33:05,177 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4999977c7e1b,36725,1731220333730 expired; onlineServers=0 2024-11-10T06:33:05,177 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '4999977c7e1b,45029,1731220333686' ***** 2024-11-10T06:33:05,177 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T06:33:05,177 INFO [M:0;4999977c7e1b:45029 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T06:33:05,177 INFO [M:0;4999977c7e1b:45029 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T06:33:05,177 DEBUG [M:0;4999977c7e1b:45029 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T06:33:05,177 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T06:33:05,177 DEBUG [M:0;4999977c7e1b:45029 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T06:33:05,177 DEBUG [master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220333891 {}] cleaner.HFileCleaner(306): Exit Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220333891,5,FailOnTimeoutGroup] 2024-11-10T06:33:05,177 DEBUG [master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220333891 {}] cleaner.HFileCleaner(306): Exit Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220333891,5,FailOnTimeoutGroup] 2024-11-10T06:33:05,177 INFO [M:0;4999977c7e1b:45029 {}] hbase.ChoreService(370): Chore service for: master/4999977c7e1b:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T06:33:05,177 INFO [M:0;4999977c7e1b:45029 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T06:33:05,177 DEBUG [M:0;4999977c7e1b:45029 {}] master.HMaster(1795): Stopping service threads 2024-11-10T06:33:05,177 INFO [M:0;4999977c7e1b:45029 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T06:33:05,178 INFO [M:0;4999977c7e1b:45029 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T06:33:05,178 INFO [M:0;4999977c7e1b:45029 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T06:33:05,178 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T06:33:05,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T06:33:05,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:33:05,179 DEBUG [M:0;4999977c7e1b:45029 {}] zookeeper.ZKUtil(347): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T06:33:05,179 WARN [M:0;4999977c7e1b:45029 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T06:33:05,179 INFO [M:0;4999977c7e1b:45029 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/.lastflushedseqids 2024-11-10T06:33:05,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741849_1025 (size=130) 2024-11-10T06:33:05,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741849_1025 (size=130) 2024-11-10T06:33:05,185 INFO [M:0;4999977c7e1b:45029 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T06:33:05,185 INFO [M:0;4999977c7e1b:45029 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T06:33:05,185 DEBUG [M:0;4999977c7e1b:45029 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T06:33:05,185 INFO [M:0;4999977c7e1b:45029 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:33:05,185 DEBUG [M:0;4999977c7e1b:45029 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:33:05,185 DEBUG [M:0;4999977c7e1b:45029 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T06:33:05,185 DEBUG [M:0;4999977c7e1b:45029 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:33:05,185 INFO [M:0;4999977c7e1b:45029 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.59 KB heapSize=55 KB 2024-11-10T06:33:05,201 DEBUG [M:0;4999977c7e1b:45029 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f222794dd6f14e688286e003556f838d is 82, key is hbase:meta,,1/info:regioninfo/1731220334514/Put/seqid=0 2024-11-10T06:33:05,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741850_1026 (size=5672) 2024-11-10T06:33:05,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741850_1026 (size=5672) 2024-11-10T06:33:05,206 INFO [M:0;4999977c7e1b:45029 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f222794dd6f14e688286e003556f838d 2024-11-10T06:33:05,225 DEBUG [M:0;4999977c7e1b:45029 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1b56c6a3d7da4148bd51b16401933720 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731220334933/Put/seqid=0 2024-11-10T06:33:05,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741851_1027 (size=7823) 2024-11-10T06:33:05,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741851_1027 (size=7823) 2024-11-10T06:33:05,231 INFO [M:0;4999977c7e1b:45029 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.99 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1b56c6a3d7da4148bd51b16401933720 2024-11-10T06:33:05,235 INFO [M:0;4999977c7e1b:45029 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1b56c6a3d7da4148bd51b16401933720 2024-11-10T06:33:05,250 DEBUG [M:0;4999977c7e1b:45029 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/435a06cf0b3a44709a03e69e59783704 is 69, key is 4999977c7e1b,36725,1731220333730/rs:state/1731220333969/Put/seqid=0 2024-11-10T06:33:05,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741852_1028 (size=5156) 2024-11-10T06:33:05,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741852_1028 (size=5156) 2024-11-10T06:33:05,255 INFO [M:0;4999977c7e1b:45029 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/435a06cf0b3a44709a03e69e59783704 2024-11-10T06:33:05,273 DEBUG [M:0;4999977c7e1b:45029 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/97b242c2fafb46d6b52d4ef9580b5a07 is 52, key is load_balancer_on/state:d/1731220334559/Put/seqid=0 2024-11-10T06:33:05,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36725-0x10190e159d00001, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:33:05,276 INFO [RS:0;4999977c7e1b:36725 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T06:33:05,276 INFO [RS:0;4999977c7e1b:36725 {}] regionserver.HRegionServer(1031): Exiting; stopping=4999977c7e1b,36725,1731220333730; zookeeper connection closed. 2024-11-10T06:33:05,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36725-0x10190e159d00001, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:33:05,276 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6844f6f5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6844f6f5 2024-11-10T06:33:05,277 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-10T06:33:05,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741853_1029 (size=5056) 2024-11-10T06:33:05,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741853_1029 (size=5056) 2024-11-10T06:33:05,278 INFO [M:0;4999977c7e1b:45029 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/97b242c2fafb46d6b52d4ef9580b5a07 2024-11-10T06:33:05,283 DEBUG [M:0;4999977c7e1b:45029 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f222794dd6f14e688286e003556f838d as hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f222794dd6f14e688286e003556f838d 2024-11-10T06:33:05,288 INFO [M:0;4999977c7e1b:45029 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f222794dd6f14e688286e003556f838d, entries=8, sequenceid=121, filesize=5.5 K 2024-11-10T06:33:05,289 DEBUG [M:0;4999977c7e1b:45029 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1b56c6a3d7da4148bd51b16401933720 as hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1b56c6a3d7da4148bd51b16401933720 2024-11-10T06:33:05,294 INFO [M:0;4999977c7e1b:45029 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1b56c6a3d7da4148bd51b16401933720 2024-11-10T06:33:05,294 INFO [M:0;4999977c7e1b:45029 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1b56c6a3d7da4148bd51b16401933720, entries=14, sequenceid=121, filesize=7.6 K 2024-11-10T06:33:05,295 DEBUG [M:0;4999977c7e1b:45029 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/435a06cf0b3a44709a03e69e59783704 as hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/435a06cf0b3a44709a03e69e59783704 2024-11-10T06:33:05,299 INFO [M:0;4999977c7e1b:45029 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/435a06cf0b3a44709a03e69e59783704, entries=1, sequenceid=121, filesize=5.0 K 2024-11-10T06:33:05,300 DEBUG [M:0;4999977c7e1b:45029 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/97b242c2fafb46d6b52d4ef9580b5a07 as hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/97b242c2fafb46d6b52d4ef9580b5a07 2024-11-10T06:33:05,304 INFO [M:0;4999977c7e1b:45029 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40549/user/jenkins/test-data/a8602ecd-14dd-8a7b-4115-7d44161e4014/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/97b242c2fafb46d6b52d4ef9580b5a07, entries=1, sequenceid=121, filesize=4.9 K 2024-11-10T06:33:05,305 INFO [M:0;4999977c7e1b:45029 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 120ms, sequenceid=121, compaction requested=false 2024-11-10T06:33:05,307 INFO [M:0;4999977c7e1b:45029 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:33:05,307 DEBUG [M:0;4999977c7e1b:45029 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731220385185Disabling compacts and flushes for region at 1731220385185Disabling writes for close at 1731220385185Obtaining lock to block concurrent updates at 1731220385185Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731220385185Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44638, getHeapSize=56256, getOffHeapSize=0, getCellsCount=140 at 1731220385185Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731220385186 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731220385186Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731220385201 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731220385201Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731220385210 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731220385225 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731220385225Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731220385235 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731220385249 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731220385249Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731220385259 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731220385273 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731220385273Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42cc7d5b: reopening flushed file at 1731220385283 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@577a97f9: reopening flushed file at 1731220385288 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a30ccee: reopening flushed file at 1731220385294 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@21a61796: reopening flushed file at 1731220385299 (+5 ms)Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 120ms, sequenceid=121, compaction requested=false at 1731220385305 (+6 ms)Writing region close event to WAL at 1731220385307 (+2 ms)Closed at 1731220385307 2024-11-10T06:33:05,307 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:33:05,307 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:33:05,308 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:33:05,308 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:33:05,308 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:33:05,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741830_1006 (size=53035) 2024-11-10T06:33:05,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41021 is added to blk_1073741830_1006 (size=53035) 2024-11-10T06:33:05,310 INFO [M:0;4999977c7e1b:45029 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T06:33:05,310 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T06:33:05,310 INFO [M:0;4999977c7e1b:45029 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45029 2024-11-10T06:33:05,311 INFO [M:0;4999977c7e1b:45029 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T06:33:05,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:33:05,412 INFO [M:0;4999977c7e1b:45029 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T06:33:05,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45029-0x10190e159d00000, quorum=127.0.0.1:59608, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:33:05,415 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@38bb5d1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:33:05,415 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@26789c81{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:33:05,415 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:33:05,415 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78677773{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:33:05,415 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78346695{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/hadoop.log.dir/,STOPPED} 2024-11-10T06:33:05,417 WARN [BP-763120891-172.17.0.2-1731220333038 heartbeating to localhost/127.0.0.1:40549 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:33:05,417 WARN [BP-763120891-172.17.0.2-1731220333038 heartbeating to localhost/127.0.0.1:40549 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-763120891-172.17.0.2-1731220333038 (Datanode Uuid 58f00131-e7cb-4228-a7c8-956d7c65e8a4) service to localhost/127.0.0.1:40549 2024-11-10T06:33:05,417 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:33:05,417 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:33:05,418 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/cluster_8b4a91a1-e44b-f735-838e-42cef32803bf/data/data3/current/BP-763120891-172.17.0.2-1731220333038 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:33:05,418 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/cluster_8b4a91a1-e44b-f735-838e-42cef32803bf/data/data4/current/BP-763120891-172.17.0.2-1731220333038 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:33:05,418 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:33:05,422 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@44ad4d72{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:33:05,422 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3020717b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:33:05,422 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:33:05,422 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1357824d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:33:05,422 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@432cb77c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/hadoop.log.dir/,STOPPED} 2024-11-10T06:33:05,424 WARN [BP-763120891-172.17.0.2-1731220333038 heartbeating to localhost/127.0.0.1:40549 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:33:05,424 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:33:05,424 WARN [BP-763120891-172.17.0.2-1731220333038 heartbeating to localhost/127.0.0.1:40549 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-763120891-172.17.0.2-1731220333038 (Datanode Uuid 6ed51e31-d9f5-477a-b4b4-f1c0f6ae70f5) service to localhost/127.0.0.1:40549 2024-11-10T06:33:05,424 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:33:05,424 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/cluster_8b4a91a1-e44b-f735-838e-42cef32803bf/data/data1/current/BP-763120891-172.17.0.2-1731220333038 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:33:05,425 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/cluster_8b4a91a1-e44b-f735-838e-42cef32803bf/data/data2/current/BP-763120891-172.17.0.2-1731220333038 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:33:05,425 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:33:05,431 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@fa7e208{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T06:33:05,432 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@db2e6f4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:33:05,432 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:33:05,432 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7721c444{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:33:05,432 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30ab2a92{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/hadoop.log.dir/,STOPPED} 2024-11-10T06:33:05,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:05,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:05,438 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-10T06:33:05,455 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-10T06:33:05,463 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=205 (was 179) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:40549 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40549 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:40549 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40549 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40549 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:40549 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40549 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/4999977c7e1b:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:40549 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=485 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=40 (was 92), ProcessCount=11 (was 11), AvailableMemoryMB=6992 (was 7034) 2024-11-10T06:33:05,471 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=485, MaxFileDescriptor=1048576, SystemLoadAverage=40, ProcessCount=11, AvailableMemoryMB=6992 2024-11-10T06:33:05,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T06:33:05,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/hadoop.log.dir so I do NOT create it in target/test-data/579b6725-10db-1652-b2fb-64f330c8e350 2024-11-10T06:33:05,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ce366b87-3013-a2af-74ca-12f9bf2fc713/hadoop.tmp.dir so I do NOT create it in target/test-data/579b6725-10db-1652-b2fb-64f330c8e350 2024-11-10T06:33:05,471 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/cluster_ef24c530-3e5a-da26-5472-37cd2ee79f54, deleteOnExit=true 2024-11-10T06:33:05,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-10T06:33:05,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/test.cache.data in system properties and HBase conf 2024-11-10T06:33:05,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T06:33:05,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/hadoop.log.dir in system properties and HBase conf 2024-11-10T06:33:05,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T06:33:05,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T06:33:05,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T06:33:05,472 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T06:33:05,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T06:33:05,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T06:33:05,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T06:33:05,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T06:33:05,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T06:33:05,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T06:33:05,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T06:33:05,473 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T06:33:05,473 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T06:33:05,473 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/nfs.dump.dir in system properties and HBase conf 2024-11-10T06:33:05,473 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/java.io.tmpdir in system properties and HBase conf 2024-11-10T06:33:05,473 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T06:33:05,473 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T06:33:05,473 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T06:33:05,486 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T06:33:05,498 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T06:33:05,498 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-10T06:33:05,499 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-10T06:33:05,499 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T06:33:05,539 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:33:05,543 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:33:05,544 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:33:05,544 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:33:05,544 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T06:33:05,544 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:33:05,545 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f89d121{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:33:05,545 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13bc5e1d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:33:05,661 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@34478999{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/java.io.tmpdir/jetty-localhost-34711-hadoop-hdfs-3_4_1-tests_jar-_-any-6558470785583476913/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T06:33:05,662 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ffb9dd1{HTTP/1.1, (http/1.1)}{localhost:34711} 2024-11-10T06:33:05,662 INFO [Time-limited test {}] server.Server(415): Started @235362ms 2024-11-10T06:33:05,675 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T06:33:05,722 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:33:05,725 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:33:05,726 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:33:05,726 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:33:05,726 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T06:33:05,744 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42424a91{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:33:05,744 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41b0b61{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:33:05,857 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c013a6e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/java.io.tmpdir/jetty-localhost-42849-hadoop-hdfs-3_4_1-tests_jar-_-any-2466853555214077750/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:33:05,857 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1798c27d{HTTP/1.1, (http/1.1)}{localhost:42849} 2024-11-10T06:33:05,857 INFO [Time-limited test {}] server.Server(415): Started @235558ms 2024-11-10T06:33:05,859 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:33:05,888 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:33:05,891 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:33:05,891 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:33:05,891 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:33:05,891 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T06:33:05,892 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a96e527{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:33:05,892 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57641b80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:33:05,940 WARN [Thread-1950 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/cluster_ef24c530-3e5a-da26-5472-37cd2ee79f54/data/data1/current/BP-377058856-172.17.0.2-1731220385492/current, will proceed with Du for space computation calculation, 2024-11-10T06:33:05,940 WARN [Thread-1951 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/cluster_ef24c530-3e5a-da26-5472-37cd2ee79f54/data/data2/current/BP-377058856-172.17.0.2-1731220385492/current, will proceed with Du for space computation calculation, 2024-11-10T06:33:05,956 WARN [Thread-1929 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:33:05,958 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1ad191780ff138ce with lease ID 0x8fe3745b1dd581d3: Processing first storage report for DS-3388df5c-9ea0-4b12-981b-6f09e87aae20 from datanode DatanodeRegistration(127.0.0.1:34635, datanodeUuid=e2ce63fb-ae1d-4b2f-89cb-747b93b25985, infoPort=34843, infoSecurePort=0, ipcPort=36405, storageInfo=lv=-57;cid=testClusterID;nsid=2120962463;c=1731220385492) 2024-11-10T06:33:05,958 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1ad191780ff138ce with lease ID 0x8fe3745b1dd581d3: from storage DS-3388df5c-9ea0-4b12-981b-6f09e87aae20 node DatanodeRegistration(127.0.0.1:34635, datanodeUuid=e2ce63fb-ae1d-4b2f-89cb-747b93b25985, infoPort=34843, infoSecurePort=0, ipcPort=36405, storageInfo=lv=-57;cid=testClusterID;nsid=2120962463;c=1731220385492), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:33:05,959 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1ad191780ff138ce with lease ID 0x8fe3745b1dd581d3: Processing first storage report for DS-5ad88102-294b-42cd-89ca-c2779dbcc083 from datanode DatanodeRegistration(127.0.0.1:34635, datanodeUuid=e2ce63fb-ae1d-4b2f-89cb-747b93b25985, infoPort=34843, infoSecurePort=0, ipcPort=36405, storageInfo=lv=-57;cid=testClusterID;nsid=2120962463;c=1731220385492) 2024-11-10T06:33:05,959 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1ad191780ff138ce with lease ID 0x8fe3745b1dd581d3: from storage DS-5ad88102-294b-42cd-89ca-c2779dbcc083 node DatanodeRegistration(127.0.0.1:34635, datanodeUuid=e2ce63fb-ae1d-4b2f-89cb-747b93b25985, infoPort=34843, infoSecurePort=0, ipcPort=36405, storageInfo=lv=-57;cid=testClusterID;nsid=2120962463;c=1731220385492), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:33:05,986 INFO [regionserver/4999977c7e1b:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T06:33:06,009 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2bf4ff10{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/java.io.tmpdir/jetty-localhost-33353-hadoop-hdfs-3_4_1-tests_jar-_-any-15626157095466810701/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:33:06,009 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6ccca08b{HTTP/1.1, (http/1.1)}{localhost:33353} 2024-11-10T06:33:06,009 INFO [Time-limited test {}] server.Server(415): Started @235710ms 2024-11-10T06:33:06,010 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:33:06,098 WARN [Thread-1977 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/cluster_ef24c530-3e5a-da26-5472-37cd2ee79f54/data/data4/current/BP-377058856-172.17.0.2-1731220385492/current, will proceed with Du for space computation calculation, 2024-11-10T06:33:06,098 WARN [Thread-1976 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/cluster_ef24c530-3e5a-da26-5472-37cd2ee79f54/data/data3/current/BP-377058856-172.17.0.2-1731220385492/current, will proceed with Du for space computation calculation, 2024-11-10T06:33:06,114 WARN [Thread-1965 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:33:06,116 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x875e9fb525758dd5 with lease ID 0x8fe3745b1dd581d4: Processing first storage report for DS-1f019b69-2467-48b0-acc8-b6371b1f86fb from datanode DatanodeRegistration(127.0.0.1:45029, datanodeUuid=ab94b412-564b-4790-bf2f-58b43a9ddb36, infoPort=32853, infoSecurePort=0, ipcPort=42913, storageInfo=lv=-57;cid=testClusterID;nsid=2120962463;c=1731220385492) 2024-11-10T06:33:06,116 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x875e9fb525758dd5 with lease ID 0x8fe3745b1dd581d4: from storage DS-1f019b69-2467-48b0-acc8-b6371b1f86fb node DatanodeRegistration(127.0.0.1:45029, datanodeUuid=ab94b412-564b-4790-bf2f-58b43a9ddb36, infoPort=32853, infoSecurePort=0, ipcPort=42913, storageInfo=lv=-57;cid=testClusterID;nsid=2120962463;c=1731220385492), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:33:06,116 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x875e9fb525758dd5 with lease ID 0x8fe3745b1dd581d4: Processing first storage report for DS-dcec0171-b621-41b3-968a-5aef51bbcb6b from datanode DatanodeRegistration(127.0.0.1:45029, datanodeUuid=ab94b412-564b-4790-bf2f-58b43a9ddb36, infoPort=32853, infoSecurePort=0, ipcPort=42913, storageInfo=lv=-57;cid=testClusterID;nsid=2120962463;c=1731220385492) 2024-11-10T06:33:06,116 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x875e9fb525758dd5 with lease ID 0x8fe3745b1dd581d4: from storage DS-dcec0171-b621-41b3-968a-5aef51bbcb6b node DatanodeRegistration(127.0.0.1:45029, datanodeUuid=ab94b412-564b-4790-bf2f-58b43a9ddb36, infoPort=32853, infoSecurePort=0, ipcPort=42913, storageInfo=lv=-57;cid=testClusterID;nsid=2120962463;c=1731220385492), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:33:06,132 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350 2024-11-10T06:33:06,135 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/cluster_ef24c530-3e5a-da26-5472-37cd2ee79f54/zookeeper_0, clientPort=55342, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/cluster_ef24c530-3e5a-da26-5472-37cd2ee79f54/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/cluster_ef24c530-3e5a-da26-5472-37cd2ee79f54/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T06:33:06,136 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55342 2024-11-10T06:33:06,136 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:33:06,137 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:33:06,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741825_1001 (size=7) 2024-11-10T06:33:06,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741825_1001 (size=7) 2024-11-10T06:33:06,147 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b with version=8 2024-11-10T06:33:06,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/hbase-staging 2024-11-10T06:33:06,149 INFO [Time-limited test {}] client.ConnectionUtils(128): master/4999977c7e1b:0 server-side Connection retries=45 2024-11-10T06:33:06,149 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:33:06,149 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T06:33:06,149 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T06:33:06,149 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:33:06,149 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T06:33:06,149 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T06:33:06,149 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T06:33:06,150 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34781 2024-11-10T06:33:06,151 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34781 connecting to ZooKeeper ensemble=127.0.0.1:55342 2024-11-10T06:33:06,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:347810x0, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T06:33:06,158 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34781-0x10190e226bf0000 connected 2024-11-10T06:33:06,172 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:33:06,173 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:33:06,175 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:33:06,175 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b, hbase.cluster.distributed=false 2024-11-10T06:33:06,176 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T06:33:06,179 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34781 2024-11-10T06:33:06,180 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34781 2024-11-10T06:33:06,180 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34781 2024-11-10T06:33:06,182 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34781 2024-11-10T06:33:06,182 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34781 2024-11-10T06:33:06,198 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4999977c7e1b:0 server-side Connection retries=45 2024-11-10T06:33:06,198 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:33:06,198 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T06:33:06,198 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T06:33:06,198 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:33:06,198 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T06:33:06,198 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T06:33:06,198 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T06:33:06,199 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36039 2024-11-10T06:33:06,200 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36039 connecting to ZooKeeper ensemble=127.0.0.1:55342 2024-11-10T06:33:06,200 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:33:06,202 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:33:06,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:360390x0, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T06:33:06,206 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:360390x0, quorum=127.0.0.1:55342, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:33:06,206 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36039-0x10190e226bf0001 connected 2024-11-10T06:33:06,207 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T06:33:06,207 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T06:33:06,208 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36039-0x10190e226bf0001, quorum=127.0.0.1:55342, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T06:33:06,209 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36039-0x10190e226bf0001, quorum=127.0.0.1:55342, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T06:33:06,209 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36039 2024-11-10T06:33:06,209 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36039 2024-11-10T06:33:06,209 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36039 2024-11-10T06:33:06,210 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36039 2024-11-10T06:33:06,210 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36039 2024-11-10T06:33:06,222 DEBUG [M:0;4999977c7e1b:34781 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;4999977c7e1b:34781 2024-11-10T06:33:06,222 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/4999977c7e1b,34781,1731220386149 2024-11-10T06:33:06,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x10190e226bf0001, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:33:06,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:33:06,224 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/4999977c7e1b,34781,1731220386149 2024-11-10T06:33:06,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x10190e226bf0001, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T06:33:06,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x10190e226bf0001, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:33:06,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:33:06,226 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T06:33:06,227 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/4999977c7e1b,34781,1731220386149 from backup master directory 2024-11-10T06:33:06,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/4999977c7e1b,34781,1731220386149 2024-11-10T06:33:06,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x10190e226bf0001, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:33:06,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:33:06,228 WARN [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T06:33:06,228 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=4999977c7e1b,34781,1731220386149 2024-11-10T06:33:06,236 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/hbase.id] with ID: f3f0aea4-0b1a-4bfe-b55e-ac9e3f0f56e5 2024-11-10T06:33:06,236 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/.tmp/hbase.id 2024-11-10T06:33:06,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741826_1002 (size=42) 2024-11-10T06:33:06,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741826_1002 (size=42) 2024-11-10T06:33:06,242 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/.tmp/hbase.id]:[hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/hbase.id] 2024-11-10T06:33:06,252 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:33:06,252 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T06:33:06,253 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-10T06:33:06,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:33:06,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x10190e226bf0001, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:33:06,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741827_1003 (size=196) 2024-11-10T06:33:06,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741827_1003 (size=196) 2024-11-10T06:33:06,261 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T06:33:06,262 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T06:33:06,262 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:33:06,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741828_1004 (size=1189) 2024-11-10T06:33:06,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741828_1004 (size=1189) 2024-11-10T06:33:06,269 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store 2024-11-10T06:33:06,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741829_1005 (size=34) 2024-11-10T06:33:06,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741829_1005 (size=34) 2024-11-10T06:33:06,276 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:33:06,276 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T06:33:06,276 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:33:06,276 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:33:06,276 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T06:33:06,276 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:33:06,276 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:33:06,276 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731220386276Disabling compacts and flushes for region at 1731220386276Disabling writes for close at 1731220386276Writing region close event to WAL at 1731220386276Closed at 1731220386276 2024-11-10T06:33:06,276 WARN [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/.initializing 2024-11-10T06:33:06,277 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/WALs/4999977c7e1b,34781,1731220386149 2024-11-10T06:33:06,279 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C34781%2C1731220386149, suffix=, logDir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/WALs/4999977c7e1b,34781,1731220386149, archiveDir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/oldWALs, maxLogs=10 2024-11-10T06:33:06,279 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C34781%2C1731220386149.1731220386279 2024-11-10T06:33:06,283 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/WALs/4999977c7e1b,34781,1731220386149/4999977c7e1b%2C34781%2C1731220386149.1731220386279 2024-11-10T06:33:06,285 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32853:32853),(127.0.0.1/127.0.0.1:34843:34843)] 2024-11-10T06:33:06,286 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T06:33:06,286 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:33:06,286 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:33:06,286 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:33:06,287 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:33:06,289 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T06:33:06,289 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:33:06,289 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:33:06,289 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:33:06,290 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T06:33:06,290 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:33:06,290 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:33:06,290 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:33:06,291 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T06:33:06,291 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:33:06,291 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:33:06,292 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:33:06,292 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T06:33:06,293 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:33:06,293 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:33:06,293 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:33:06,294 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:33:06,294 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:33:06,295 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:33:06,295 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:33:06,295 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T06:33:06,296 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:33:06,298 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T06:33:06,298 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=717986, jitterRate=-0.08703470230102539}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T06:33:06,299 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731220386286Initializing all the Stores at 1731220386287 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220386287Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220386287Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220386287Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220386287Cleaning up temporary data from old regions at 1731220386295 (+8 ms)Region opened successfully at 1731220386299 (+4 ms) 2024-11-10T06:33:06,299 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T06:33:06,302 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@120daa1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4999977c7e1b/172.17.0.2:0 2024-11-10T06:33:06,303 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T06:33:06,303 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T06:33:06,303 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T06:33:06,303 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T06:33:06,303 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-10T06:33:06,304 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-10T06:33:06,304 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T06:33:06,307 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T06:33:06,307 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T06:33:06,309 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T06:33:06,310 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T06:33:06,310 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T06:33:06,312 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T06:33:06,312 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T06:33:06,313 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T06:33:06,314 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T06:33:06,315 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T06:33:06,317 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T06:33:06,318 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T06:33:06,320 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T06:33:06,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x10190e226bf0001, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T06:33:06,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T06:33:06,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x10190e226bf0001, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:33:06,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:33:06,322 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=4999977c7e1b,34781,1731220386149, sessionid=0x10190e226bf0000, setting cluster-up flag (Was=false) 2024-11-10T06:33:06,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x10190e226bf0001, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:33:06,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:33:06,331 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T06:33:06,332 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4999977c7e1b,34781,1731220386149 2024-11-10T06:33:06,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x10190e226bf0001, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:33:06,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:33:06,340 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T06:33:06,341 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4999977c7e1b,34781,1731220386149 2024-11-10T06:33:06,342 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T06:33:06,343 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T06:33:06,343 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T06:33:06,344 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T06:33:06,344 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 4999977c7e1b,34781,1731220386149 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T06:33:06,345 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:33:06,345 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:33:06,345 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:33:06,345 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:33:06,345 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/4999977c7e1b:0, corePoolSize=10, maxPoolSize=10 2024-11-10T06:33:06,345 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:33:06,345 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/4999977c7e1b:0, corePoolSize=2, maxPoolSize=2 2024-11-10T06:33:06,345 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:33:06,347 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:33:06,347 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T06:33:06,347 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731220416347 2024-11-10T06:33:06,347 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T06:33:06,347 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T06:33:06,347 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T06:33:06,347 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T06:33:06,347 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T06:33:06,347 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T06:33:06,348 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:33:06,348 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:06,348 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T06:33:06,348 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T06:33:06,348 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T06:33:06,348 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T06:33:06,348 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T06:33:06,348 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T06:33:06,349 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220386348,5,FailOnTimeoutGroup] 2024-11-10T06:33:06,349 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220386349,5,FailOnTimeoutGroup] 2024-11-10T06:33:06,349 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:06,350 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T06:33:06,350 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:06,350 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:06,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741831_1007 (size=1321) 2024-11-10T06:33:06,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741831_1007 (size=1321) 2024-11-10T06:33:06,355 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T06:33:06,355 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b 2024-11-10T06:33:06,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741832_1008 (size=32) 2024-11-10T06:33:06,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741832_1008 (size=32) 2024-11-10T06:33:06,363 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:33:06,368 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T06:33:06,369 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T06:33:06,369 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:33:06,369 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:33:06,369 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T06:33:06,370 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T06:33:06,370 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:33:06,371 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:33:06,371 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T06:33:06,372 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T06:33:06,372 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:33:06,372 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:33:06,372 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T06:33:06,373 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T06:33:06,373 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:33:06,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:33:06,374 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T06:33:06,374 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740 2024-11-10T06:33:06,374 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740 2024-11-10T06:33:06,376 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T06:33:06,376 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T06:33:06,376 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T06:33:06,377 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T06:33:06,379 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T06:33:06,379 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=862995, jitterRate=0.09735557436943054}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T06:33:06,380 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731220386363Initializing all the Stores at 1731220386364 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220386364Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220386367 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220386368 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220386368Cleaning up temporary data from old regions at 1731220386376 (+8 ms)Region opened successfully at 1731220386380 (+4 ms) 2024-11-10T06:33:06,380 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T06:33:06,380 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T06:33:06,380 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T06:33:06,380 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T06:33:06,380 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T06:33:06,381 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T06:33:06,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731220386380Disabling compacts and flushes for region at 1731220386380Disabling writes for close at 1731220386380Writing region close event to WAL at 1731220386381 (+1 ms)Closed at 1731220386381 2024-11-10T06:33:06,382 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:33:06,382 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T06:33:06,382 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T06:33:06,384 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T06:33:06,385 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T06:33:06,412 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.HRegionServer(746): ClusterId : f3f0aea4-0b1a-4bfe-b55e-ac9e3f0f56e5 2024-11-10T06:33:06,412 DEBUG [RS:0;4999977c7e1b:36039 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T06:33:06,414 DEBUG [RS:0;4999977c7e1b:36039 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T06:33:06,414 DEBUG [RS:0;4999977c7e1b:36039 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T06:33:06,416 DEBUG [RS:0;4999977c7e1b:36039 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T06:33:06,417 DEBUG [RS:0;4999977c7e1b:36039 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ab1455a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4999977c7e1b/172.17.0.2:0 2024-11-10T06:33:06,429 DEBUG [RS:0;4999977c7e1b:36039 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;4999977c7e1b:36039 2024-11-10T06:33:06,429 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T06:33:06,429 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T06:33:06,429 DEBUG [RS:0;4999977c7e1b:36039 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T06:33:06,429 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.HRegionServer(2659): reportForDuty to master=4999977c7e1b,34781,1731220386149 with port=36039, startcode=1731220386197 2024-11-10T06:33:06,430 DEBUG [RS:0;4999977c7e1b:36039 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T06:33:06,432 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55225, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T06:33:06,432 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34781 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4999977c7e1b,36039,1731220386197 2024-11-10T06:33:06,432 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34781 {}] master.ServerManager(517): Registering regionserver=4999977c7e1b,36039,1731220386197 2024-11-10T06:33:06,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:06,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:06,434 DEBUG [RS:0;4999977c7e1b:36039 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b 2024-11-10T06:33:06,434 DEBUG [RS:0;4999977c7e1b:36039 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35403 2024-11-10T06:33:06,434 DEBUG [RS:0;4999977c7e1b:36039 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T06:33:06,436 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T06:33:06,437 DEBUG [RS:0;4999977c7e1b:36039 {}] zookeeper.ZKUtil(111): regionserver:36039-0x10190e226bf0001, quorum=127.0.0.1:55342, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4999977c7e1b,36039,1731220386197 2024-11-10T06:33:06,437 WARN [RS:0;4999977c7e1b:36039 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T06:33:06,437 INFO [RS:0;4999977c7e1b:36039 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:33:06,437 DEBUG [RS:0;4999977c7e1b:36039 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/WALs/4999977c7e1b,36039,1731220386197 2024-11-10T06:33:06,437 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4999977c7e1b,36039,1731220386197] 2024-11-10T06:33:06,441 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T06:33:06,443 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T06:33:06,443 INFO [RS:0;4999977c7e1b:36039 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T06:33:06,443 INFO [RS:0;4999977c7e1b:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:06,443 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T06:33:06,444 INFO [RS:0;4999977c7e1b:36039 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T06:33:06,444 INFO [RS:0;4999977c7e1b:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:06,444 DEBUG [RS:0;4999977c7e1b:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:33:06,444 DEBUG [RS:0;4999977c7e1b:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:33:06,444 DEBUG [RS:0;4999977c7e1b:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:33:06,445 DEBUG [RS:0;4999977c7e1b:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:33:06,445 DEBUG [RS:0;4999977c7e1b:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:33:06,445 DEBUG [RS:0;4999977c7e1b:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4999977c7e1b:0, corePoolSize=2, maxPoolSize=2 2024-11-10T06:33:06,445 DEBUG [RS:0;4999977c7e1b:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:33:06,445 DEBUG [RS:0;4999977c7e1b:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:33:06,445 DEBUG [RS:0;4999977c7e1b:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:33:06,445 DEBUG [RS:0;4999977c7e1b:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:33:06,445 DEBUG [RS:0;4999977c7e1b:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:33:06,445 DEBUG [RS:0;4999977c7e1b:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:33:06,445 DEBUG [RS:0;4999977c7e1b:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4999977c7e1b:0, corePoolSize=3, maxPoolSize=3 2024-11-10T06:33:06,445 DEBUG [RS:0;4999977c7e1b:36039 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0, corePoolSize=3, maxPoolSize=3 2024-11-10T06:33:06,445 INFO [RS:0;4999977c7e1b:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:06,445 INFO [RS:0;4999977c7e1b:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:06,445 INFO [RS:0;4999977c7e1b:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:06,445 INFO [RS:0;4999977c7e1b:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:06,445 INFO [RS:0;4999977c7e1b:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:06,445 INFO [RS:0;4999977c7e1b:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,36039,1731220386197-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T06:33:06,463 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T06:33:06,463 INFO [RS:0;4999977c7e1b:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,36039,1731220386197-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:06,463 INFO [RS:0;4999977c7e1b:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:06,463 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.Replication(171): 4999977c7e1b,36039,1731220386197 started 2024-11-10T06:33:06,477 INFO [RS:0;4999977c7e1b:36039 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:06,477 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.HRegionServer(1482): Serving as 4999977c7e1b,36039,1731220386197, RpcServer on 4999977c7e1b/172.17.0.2:36039, sessionid=0x10190e226bf0001 2024-11-10T06:33:06,478 DEBUG [RS:0;4999977c7e1b:36039 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T06:33:06,478 DEBUG [RS:0;4999977c7e1b:36039 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4999977c7e1b,36039,1731220386197 2024-11-10T06:33:06,478 DEBUG [RS:0;4999977c7e1b:36039 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4999977c7e1b,36039,1731220386197' 2024-11-10T06:33:06,478 DEBUG [RS:0;4999977c7e1b:36039 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T06:33:06,478 DEBUG [RS:0;4999977c7e1b:36039 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T06:33:06,479 DEBUG [RS:0;4999977c7e1b:36039 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T06:33:06,479 DEBUG [RS:0;4999977c7e1b:36039 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T06:33:06,479 DEBUG [RS:0;4999977c7e1b:36039 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4999977c7e1b,36039,1731220386197 2024-11-10T06:33:06,479 DEBUG [RS:0;4999977c7e1b:36039 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4999977c7e1b,36039,1731220386197' 2024-11-10T06:33:06,479 DEBUG [RS:0;4999977c7e1b:36039 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T06:33:06,479 DEBUG [RS:0;4999977c7e1b:36039 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T06:33:06,479 DEBUG [RS:0;4999977c7e1b:36039 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T06:33:06,479 INFO [RS:0;4999977c7e1b:36039 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T06:33:06,479 INFO [RS:0;4999977c7e1b:36039 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T06:33:06,535 WARN [4999977c7e1b:34781 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-10T06:33:06,581 INFO [RS:0;4999977c7e1b:36039 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C36039%2C1731220386197, suffix=, logDir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/WALs/4999977c7e1b,36039,1731220386197, archiveDir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/oldWALs, maxLogs=32 2024-11-10T06:33:06,582 INFO [RS:0;4999977c7e1b:36039 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C36039%2C1731220386197.1731220386582 2024-11-10T06:33:06,588 INFO [RS:0;4999977c7e1b:36039 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/WALs/4999977c7e1b,36039,1731220386197/4999977c7e1b%2C36039%2C1731220386197.1731220386582 2024-11-10T06:33:06,592 DEBUG [RS:0;4999977c7e1b:36039 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32853:32853),(127.0.0.1/127.0.0.1:34843:34843)] 2024-11-10T06:33:06,786 DEBUG [4999977c7e1b:34781 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-10T06:33:06,786 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=4999977c7e1b,36039,1731220386197 2024-11-10T06:33:06,787 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4999977c7e1b,36039,1731220386197, state=OPENING 2024-11-10T06:33:06,790 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T06:33:06,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:33:06,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x10190e226bf0001, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:33:06,792 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T06:33:06,792 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:33:06,792 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:33:06,792 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=4999977c7e1b,36039,1731220386197}] 2024-11-10T06:33:06,945 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T06:33:06,947 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50057, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T06:33:06,950 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T06:33:06,950 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:33:06,952 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C36039%2C1731220386197.meta, suffix=.meta, logDir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/WALs/4999977c7e1b,36039,1731220386197, archiveDir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/oldWALs, maxLogs=32 2024-11-10T06:33:06,952 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C36039%2C1731220386197.meta.1731220386952.meta 2024-11-10T06:33:06,957 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/WALs/4999977c7e1b,36039,1731220386197/4999977c7e1b%2C36039%2C1731220386197.meta.1731220386952.meta 2024-11-10T06:33:06,957 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32853:32853),(127.0.0.1/127.0.0.1:34843:34843)] 2024-11-10T06:33:06,958 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T06:33:06,958 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T06:33:06,959 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T06:33:06,959 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T06:33:06,959 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T06:33:06,959 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:33:06,959 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T06:33:06,959 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T06:33:06,960 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T06:33:06,961 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T06:33:06,961 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:33:06,961 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:33:06,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T06:33:06,962 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T06:33:06,962 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:33:06,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:33:06,963 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T06:33:06,963 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T06:33:06,963 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:33:06,963 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:33:06,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T06:33:06,964 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T06:33:06,964 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:33:06,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:33:06,965 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T06:33:06,965 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740 2024-11-10T06:33:06,966 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740 2024-11-10T06:33:06,967 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T06:33:06,967 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T06:33:06,968 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T06:33:06,969 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T06:33:06,969 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=879887, jitterRate=0.11883486807346344}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T06:33:06,969 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T06:33:06,970 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731220386959Writing region info on filesystem at 1731220386959Initializing all the Stores at 1731220386960 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220386960Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220386960Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220386960Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220386960Cleaning up temporary data from old regions at 1731220386967 (+7 ms)Running coprocessor post-open hooks at 1731220386969 (+2 ms)Region opened successfully at 1731220386970 (+1 ms) 2024-11-10T06:33:06,971 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731220386944 2024-11-10T06:33:06,973 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T06:33:06,973 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T06:33:06,974 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=4999977c7e1b,36039,1731220386197 2024-11-10T06:33:06,975 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4999977c7e1b,36039,1731220386197, state=OPEN 2024-11-10T06:33:06,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T06:33:06,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x10190e226bf0001, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T06:33:06,979 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=4999977c7e1b,36039,1731220386197 2024-11-10T06:33:06,979 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:33:06,979 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:33:06,982 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T06:33:06,982 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=4999977c7e1b,36039,1731220386197 in 187 msec 2024-11-10T06:33:06,984 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T06:33:06,984 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 600 msec 2024-11-10T06:33:06,985 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:33:06,985 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T06:33:06,986 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T06:33:06,986 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4999977c7e1b,36039,1731220386197, seqNum=-1] 2024-11-10T06:33:06,987 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T06:33:06,988 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37923, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T06:33:06,992 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 649 msec 2024-11-10T06:33:06,993 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731220386993, completionTime=-1 2024-11-10T06:33:06,993 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-10T06:33:06,993 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-10T06:33:06,994 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-10T06:33:06,994 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731220446994 2024-11-10T06:33:06,995 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731220506994 2024-11-10T06:33:06,995 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-10T06:33:06,995 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,34781,1731220386149-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:06,995 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,34781,1731220386149-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:06,995 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,34781,1731220386149-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:06,995 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-4999977c7e1b:34781, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:06,995 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:06,995 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:06,997 DEBUG [master/4999977c7e1b:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T06:33:06,998 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.770sec 2024-11-10T06:33:06,999 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T06:33:06,999 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T06:33:06,999 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T06:33:06,999 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T06:33:06,999 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T06:33:06,999 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,34781,1731220386149-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T06:33:06,999 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,34781,1731220386149-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T06:33:07,001 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T06:33:07,001 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T06:33:07,001 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,34781,1731220386149-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:33:07,012 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73b2b928, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:33:07,012 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 4999977c7e1b,34781,-1 for getting cluster id 2024-11-10T06:33:07,012 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T06:33:07,013 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f3f0aea4-0b1a-4bfe-b55e-ac9e3f0f56e5' 2024-11-10T06:33:07,013 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T06:33:07,014 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f3f0aea4-0b1a-4bfe-b55e-ac9e3f0f56e5" 2024-11-10T06:33:07,014 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ace19d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:33:07,014 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4999977c7e1b,34781,-1] 2024-11-10T06:33:07,014 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T06:33:07,014 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:33:07,015 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36186, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T06:33:07,016 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@405cef44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:33:07,016 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T06:33:07,017 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4999977c7e1b,36039,1731220386197, seqNum=-1] 2024-11-10T06:33:07,017 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T06:33:07,018 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34170, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T06:33:07,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=4999977c7e1b,34781,1731220386149 2024-11-10T06:33:07,019 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:33:07,022 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-10T06:33:07,022 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-10T06:33:07,023 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 4999977c7e1b,34781,1731220386149 2024-11-10T06:33:07,023 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@42021c82 2024-11-10T06:33:07,023 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-10T06:33:07,024 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36192, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-10T06:33:07,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34781 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-10T06:33:07,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34781 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-10T06:33:07,025 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34781 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T06:33:07,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34781 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-10T06:33:07,027 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-10T06:33:07,027 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:33:07,027 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34781 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-10T06:33:07,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34781 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T06:33:07,028 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-10T06:33:07,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741835_1011 (size=381) 2024-11-10T06:33:07,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741835_1011 (size=381) 2024-11-10T06:33:07,037 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 25b555b60cc2797a1050b91aa2df6546, NAME => 'TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b 2024-11-10T06:33:07,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741836_1012 (size=64) 2024-11-10T06:33:07,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741836_1012 (size=64) 2024-11-10T06:33:07,042 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:33:07,043 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 25b555b60cc2797a1050b91aa2df6546, disabling compactions & flushes 2024-11-10T06:33:07,043 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. 2024-11-10T06:33:07,043 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. 2024-11-10T06:33:07,043 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. after waiting 0 ms 2024-11-10T06:33:07,043 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. 2024-11-10T06:33:07,043 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. 2024-11-10T06:33:07,043 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 25b555b60cc2797a1050b91aa2df6546: Waiting for close lock at 1731220387043Disabling compacts and flushes for region at 1731220387043Disabling writes for close at 1731220387043Writing region close event to WAL at 1731220387043Closed at 1731220387043 2024-11-10T06:33:07,044 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-10T06:33:07,044 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731220387044"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731220387044"}]},"ts":"1731220387044"} 2024-11-10T06:33:07,046 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-10T06:33:07,047 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-10T06:33:07,047 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731220387047"}]},"ts":"1731220387047"} 2024-11-10T06:33:07,049 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-10T06:33:07,049 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=25b555b60cc2797a1050b91aa2df6546, ASSIGN}] 2024-11-10T06:33:07,050 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=25b555b60cc2797a1050b91aa2df6546, ASSIGN 2024-11-10T06:33:07,051 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=25b555b60cc2797a1050b91aa2df6546, ASSIGN; state=OFFLINE, location=4999977c7e1b,36039,1731220386197; forceNewPlan=false, retain=false 2024-11-10T06:33:07,202 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=25b555b60cc2797a1050b91aa2df6546, regionState=OPENING, regionLocation=4999977c7e1b,36039,1731220386197 2024-11-10T06:33:07,204 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=25b555b60cc2797a1050b91aa2df6546, ASSIGN because future has completed 2024-11-10T06:33:07,205 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 25b555b60cc2797a1050b91aa2df6546, server=4999977c7e1b,36039,1731220386197}] 2024-11-10T06:33:07,362 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. 2024-11-10T06:33:07,362 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 25b555b60cc2797a1050b91aa2df6546, NAME => 'TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546.', STARTKEY => '', ENDKEY => ''} 2024-11-10T06:33:07,362 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:07,362 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:33:07,362 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:07,362 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:07,364 INFO [StoreOpener-25b555b60cc2797a1050b91aa2df6546-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:07,365 INFO [StoreOpener-25b555b60cc2797a1050b91aa2df6546-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 25b555b60cc2797a1050b91aa2df6546 columnFamilyName info 2024-11-10T06:33:07,365 DEBUG [StoreOpener-25b555b60cc2797a1050b91aa2df6546-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:33:07,366 INFO [StoreOpener-25b555b60cc2797a1050b91aa2df6546-1 {}] regionserver.HStore(327): Store=25b555b60cc2797a1050b91aa2df6546/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:33:07,366 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:07,367 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:07,367 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:07,367 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:07,367 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:07,369 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:07,371 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T06:33:07,371 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 25b555b60cc2797a1050b91aa2df6546; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=757074, jitterRate=-0.03733091056346893}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T06:33:07,371 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:07,372 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 25b555b60cc2797a1050b91aa2df6546: Running coprocessor pre-open hook at 1731220387363Writing region info on filesystem at 1731220387363Initializing all the Stores at 1731220387363Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220387363Cleaning up temporary data from old regions at 1731220387367 (+4 ms)Running coprocessor post-open hooks at 1731220387371 (+4 ms)Region opened successfully at 1731220387372 (+1 ms) 2024-11-10T06:33:07,373 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546., pid=6, masterSystemTime=1731220387357 2024-11-10T06:33:07,375 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. 2024-11-10T06:33:07,375 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. 2024-11-10T06:33:07,376 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=25b555b60cc2797a1050b91aa2df6546, regionState=OPEN, openSeqNum=2, regionLocation=4999977c7e1b,36039,1731220386197 2024-11-10T06:33:07,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 25b555b60cc2797a1050b91aa2df6546, server=4999977c7e1b,36039,1731220386197 because future has completed 2024-11-10T06:33:07,381 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-10T06:33:07,381 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 25b555b60cc2797a1050b91aa2df6546, server=4999977c7e1b,36039,1731220386197 in 174 msec 2024-11-10T06:33:07,383 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-10T06:33:07,384 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=25b555b60cc2797a1050b91aa2df6546, ASSIGN in 332 msec 2024-11-10T06:33:07,384 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-10T06:33:07,385 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731220387384"}]},"ts":"1731220387384"} 2024-11-10T06:33:07,387 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-10T06:33:07,387 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-10T06:33:07,389 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 362 msec 2024-11-10T06:33:07,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:07,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:08,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:08,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:09,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:09,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:09,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:09,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:09,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:09,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:09,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:09,996 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:09,996 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:09,996 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,009 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,009 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,009 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,009 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,009 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,012 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,014 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:10,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:10,518 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-10T06:33:10,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,520 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,520 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,521 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,537 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,537 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,537 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,537 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,537 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:10,544 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:11,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:11,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:12,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:12,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:12,441 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-10T06:33:12,441 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-10T06:33:13,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:13,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:14,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:14,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:15,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:15,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:15,498 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-10T06:33:15,498 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-10T06:33:15,499 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T06:33:15,499 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-10T06:33:15,499 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-10T06:33:15,499 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-10T06:33:15,500 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-10T06:33:15,500 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-10T06:33:16,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:16,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:17,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34781 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T06:33:17,116 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-10T06:33:17,116 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-10T06:33:17,119 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-10T06:33:17,119 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. 2024-11-10T06:33:17,121 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546., hostname=4999977c7e1b,36039,1731220386197, seqNum=2] 2024-11-10T06:33:17,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(8855): Flush requested on 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:17,133 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 25b555b60cc2797a1050b91aa2df6546 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-10T06:33:17,151 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/5a049c43d6844959b07f2ce3337b4135 is 1080, key is row0001/info:/1731220397122/Put/seqid=0 2024-11-10T06:33:17,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741837_1013 (size=12509) 2024-11-10T06:33:17,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741837_1013 (size=12509) 2024-11-10T06:33:17,158 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/5a049c43d6844959b07f2ce3337b4135 2024-11-10T06:33:17,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/5a049c43d6844959b07f2ce3337b4135 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/5a049c43d6844959b07f2ce3337b4135 2024-11-10T06:33:17,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=25b555b60cc2797a1050b91aa2df6546, server=4999977c7e1b,36039,1731220386197 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-10T06:33:17,170 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/5a049c43d6844959b07f2ce3337b4135, entries=7, sequenceid=11, filesize=12.2 K 2024-11-10T06:33:17,171 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 25b555b60cc2797a1050b91aa2df6546 in 38ms, sequenceid=11, compaction requested=false 2024-11-10T06:33:17,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 25b555b60cc2797a1050b91aa2df6546: 2024-11-10T06:33:17,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34170 deadline: 1731220407168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=25b555b60cc2797a1050b91aa2df6546, server=4999977c7e1b,36039,1731220386197 2024-11-10T06:33:17,193 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546., hostname=4999977c7e1b,36039,1731220386197, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546., hostname=4999977c7e1b,36039,1731220386197, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=25b555b60cc2797a1050b91aa2df6546, server=4999977c7e1b,36039,1731220386197 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-10T06:33:17,194 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546., hostname=4999977c7e1b,36039,1731220386197, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=25b555b60cc2797a1050b91aa2df6546, server=4999977c7e1b,36039,1731220386197 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-10T06:33:17,194 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546., hostname=4999977c7e1b,36039,1731220386197, seqNum=2 because the exception is null or not the one we care about 2024-11-10T06:33:17,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:17,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:18,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:18,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:19,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:19,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:20,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:20,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:21,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:21,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:22,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:22,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:23,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:23,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:24,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:24,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:25,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:25,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:26,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:26,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:27,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(8855): Flush requested on 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:27,257 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 25b555b60cc2797a1050b91aa2df6546 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-10T06:33:27,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/3075e107deb84ef0b76e4cbabc2c5c1f is 1080, key is row0008/info:/1731220397134/Put/seqid=0 2024-11-10T06:33:27,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741838_1014 (size=29761) 2024-11-10T06:33:27,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741838_1014 (size=29761) 2024-11-10T06:33:27,268 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/3075e107deb84ef0b76e4cbabc2c5c1f 2024-11-10T06:33:27,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/3075e107deb84ef0b76e4cbabc2c5c1f as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/3075e107deb84ef0b76e4cbabc2c5c1f 2024-11-10T06:33:27,278 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/3075e107deb84ef0b76e4cbabc2c5c1f, entries=23, sequenceid=37, filesize=29.1 K 2024-11-10T06:33:27,279 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 25b555b60cc2797a1050b91aa2df6546 in 22ms, sequenceid=37, compaction requested=false 2024-11-10T06:33:27,279 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 25b555b60cc2797a1050b91aa2df6546: 2024-11-10T06:33:27,279 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-10T06:33:27,279 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:33:27,279 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/3075e107deb84ef0b76e4cbabc2c5c1f because midkey is the same as first or last row 2024-11-10T06:33:27,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:27,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:28,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:28,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:29,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(8855): Flush requested on 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:29,270 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 25b555b60cc2797a1050b91aa2df6546 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-10T06:33:29,275 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/2ff58c985f30430693c13f4cc5a15311 is 1080, key is row0031/info:/1731220407258/Put/seqid=0 2024-11-10T06:33:29,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741839_1015 (size=12509) 2024-11-10T06:33:29,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741839_1015 (size=12509) 2024-11-10T06:33:29,280 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/2ff58c985f30430693c13f4cc5a15311 2024-11-10T06:33:29,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/2ff58c985f30430693c13f4cc5a15311 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/2ff58c985f30430693c13f4cc5a15311 2024-11-10T06:33:29,291 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/2ff58c985f30430693c13f4cc5a15311, entries=7, sequenceid=47, filesize=12.2 K 2024-11-10T06:33:29,292 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 25b555b60cc2797a1050b91aa2df6546 in 22ms, sequenceid=47, compaction requested=true 2024-11-10T06:33:29,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 25b555b60cc2797a1050b91aa2df6546: 2024-11-10T06:33:29,293 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-10T06:33:29,293 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:33:29,293 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/3075e107deb84ef0b76e4cbabc2c5c1f because midkey is the same as first or last row 2024-11-10T06:33:29,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 25b555b60cc2797a1050b91aa2df6546:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T06:33:29,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:33:29,293 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T06:33:29,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(8855): Flush requested on 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:29,293 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 25b555b60cc2797a1050b91aa2df6546 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-10T06:33:29,294 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T06:33:29,295 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1541): 25b555b60cc2797a1050b91aa2df6546/info is initiating minor compaction (all files) 2024-11-10T06:33:29,295 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 25b555b60cc2797a1050b91aa2df6546/info in TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. 2024-11-10T06:33:29,295 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/5a049c43d6844959b07f2ce3337b4135, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/3075e107deb84ef0b76e4cbabc2c5c1f, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/2ff58c985f30430693c13f4cc5a15311] into tmpdir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp, totalSize=53.5 K 2024-11-10T06:33:29,295 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5a049c43d6844959b07f2ce3337b4135, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731220397122 2024-11-10T06:33:29,296 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3075e107deb84ef0b76e4cbabc2c5c1f, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731220397134 2024-11-10T06:33:29,296 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2ff58c985f30430693c13f4cc5a15311, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731220407258 2024-11-10T06:33:29,298 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/f262870a3ca045bba962b558d5002d0a is 1080, key is row0038/info:/1731220409271/Put/seqid=0 2024-11-10T06:33:29,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741840_1016 (size=20064) 2024-11-10T06:33:29,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741840_1016 (size=20064) 2024-11-10T06:33:29,306 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=64 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/f262870a3ca045bba962b558d5002d0a 2024-11-10T06:33:29,310 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 25b555b60cc2797a1050b91aa2df6546#info#compaction#58 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T06:33:29,311 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/438b823ef1944f43b2ae4863eae7a773 is 1080, key is row0001/info:/1731220397122/Put/seqid=0 2024-11-10T06:33:29,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/f262870a3ca045bba962b558d5002d0a as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/f262870a3ca045bba962b558d5002d0a 2024-11-10T06:33:29,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741841_1017 (size=44978) 2024-11-10T06:33:29,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741841_1017 (size=44978) 2024-11-10T06:33:29,318 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/f262870a3ca045bba962b558d5002d0a, entries=14, sequenceid=64, filesize=19.6 K 2024-11-10T06:33:29,319 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for 25b555b60cc2797a1050b91aa2df6546 in 26ms, sequenceid=64, compaction requested=false 2024-11-10T06:33:29,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 25b555b60cc2797a1050b91aa2df6546: 2024-11-10T06:33:29,319 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.1 K, sizeToCheck=16.0 K 2024-11-10T06:33:29,319 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:33:29,319 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/3075e107deb84ef0b76e4cbabc2c5c1f because midkey is the same as first or last row 2024-11-10T06:33:29,322 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/438b823ef1944f43b2ae4863eae7a773 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/438b823ef1944f43b2ae4863eae7a773 2024-11-10T06:33:29,328 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 25b555b60cc2797a1050b91aa2df6546/info of 25b555b60cc2797a1050b91aa2df6546 into 438b823ef1944f43b2ae4863eae7a773(size=43.9 K), total size for store is 63.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T06:33:29,328 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 25b555b60cc2797a1050b91aa2df6546: 2024-11-10T06:33:29,328 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546., storeName=25b555b60cc2797a1050b91aa2df6546/info, priority=13, startTime=1731220409293; duration=0sec 2024-11-10T06:33:29,328 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-10T06:33:29,328 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:33:29,328 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/438b823ef1944f43b2ae4863eae7a773 because midkey is the same as first or last row 2024-11-10T06:33:29,328 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-10T06:33:29,328 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:33:29,328 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/438b823ef1944f43b2ae4863eae7a773 because midkey is the same as first or last row 2024-11-10T06:33:29,328 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-10T06:33:29,328 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:33:29,328 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/438b823ef1944f43b2ae4863eae7a773 because midkey is the same as first or last row 2024-11-10T06:33:29,328 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:33:29,329 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 25b555b60cc2797a1050b91aa2df6546:info 2024-11-10T06:33:29,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:29,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:30,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:30,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:31,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(8855): Flush requested on 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:31,321 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 25b555b60cc2797a1050b91aa2df6546 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-10T06:33:31,325 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/c9b666c6b632464fbb6dbb76423a0657 is 1080, key is row0052/info:/1731220409295/Put/seqid=0 2024-11-10T06:33:31,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741842_1018 (size=20064) 2024-11-10T06:33:31,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741842_1018 (size=20064) 2024-11-10T06:33:31,331 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/c9b666c6b632464fbb6dbb76423a0657 2024-11-10T06:33:31,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/c9b666c6b632464fbb6dbb76423a0657 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/c9b666c6b632464fbb6dbb76423a0657 2024-11-10T06:33:31,343 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/c9b666c6b632464fbb6dbb76423a0657, entries=14, sequenceid=82, filesize=19.6 K 2024-11-10T06:33:31,344 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for 25b555b60cc2797a1050b91aa2df6546 in 23ms, sequenceid=82, compaction requested=true 2024-11-10T06:33:31,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 25b555b60cc2797a1050b91aa2df6546: 2024-11-10T06:33:31,344 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-10T06:33:31,344 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:33:31,345 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/438b823ef1944f43b2ae4863eae7a773 because midkey is the same as first or last row 2024-11-10T06:33:31,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 25b555b60cc2797a1050b91aa2df6546:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T06:33:31,345 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:33:31,345 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T06:33:31,346 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T06:33:31,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(8855): Flush requested on 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:31,346 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1541): 25b555b60cc2797a1050b91aa2df6546/info is initiating minor compaction (all files) 2024-11-10T06:33:31,346 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 25b555b60cc2797a1050b91aa2df6546 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-10T06:33:31,346 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 25b555b60cc2797a1050b91aa2df6546/info in TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. 2024-11-10T06:33:31,346 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/438b823ef1944f43b2ae4863eae7a773, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/f262870a3ca045bba962b558d5002d0a, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/c9b666c6b632464fbb6dbb76423a0657] into tmpdir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp, totalSize=83.1 K 2024-11-10T06:33:31,347 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting 438b823ef1944f43b2ae4863eae7a773, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731220397122 2024-11-10T06:33:31,347 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting f262870a3ca045bba962b558d5002d0a, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=64, earliestPutTs=1731220409271 2024-11-10T06:33:31,348 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting c9b666c6b632464fbb6dbb76423a0657, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731220409295 2024-11-10T06:33:31,351 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/df2d699882c74a909e13c778dc4d5c03 is 1080, key is row0066/info:/1731220411322/Put/seqid=0 2024-11-10T06:33:31,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741843_1019 (size=20064) 2024-11-10T06:33:31,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741843_1019 (size=20064) 2024-11-10T06:33:31,357 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/df2d699882c74a909e13c778dc4d5c03 2024-11-10T06:33:31,362 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 25b555b60cc2797a1050b91aa2df6546#info#compaction#61 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T06:33:31,362 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/0467660b560a4eff9b4abcc27d74c6ad is 1080, key is row0001/info:/1731220397122/Put/seqid=0 2024-11-10T06:33:31,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/df2d699882c74a909e13c778dc4d5c03 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/df2d699882c74a909e13c778dc4d5c03 2024-11-10T06:33:31,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741844_1020 (size=75378) 2024-11-10T06:33:31,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741844_1020 (size=75378) 2024-11-10T06:33:31,372 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/df2d699882c74a909e13c778dc4d5c03, entries=14, sequenceid=99, filesize=19.6 K 2024-11-10T06:33:31,373 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=14.71 KB/15064 for 25b555b60cc2797a1050b91aa2df6546 in 27ms, sequenceid=99, compaction requested=false 2024-11-10T06:33:31,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 25b555b60cc2797a1050b91aa2df6546: 2024-11-10T06:33:31,373 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.7 K, sizeToCheck=16.0 K 2024-11-10T06:33:31,373 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:33:31,373 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/438b823ef1944f43b2ae4863eae7a773 because midkey is the same as first or last row 2024-11-10T06:33:31,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(8855): Flush requested on 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:31,374 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 25b555b60cc2797a1050b91aa2df6546 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-10T06:33:31,374 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/0467660b560a4eff9b4abcc27d74c6ad as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/0467660b560a4eff9b4abcc27d74c6ad 2024-11-10T06:33:31,378 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/c6dccb019abe4d5eaf1c036fd7599b3f is 1080, key is row0080/info:/1731220411347/Put/seqid=0 2024-11-10T06:33:31,382 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 25b555b60cc2797a1050b91aa2df6546/info of 25b555b60cc2797a1050b91aa2df6546 into 0467660b560a4eff9b4abcc27d74c6ad(size=73.6 K), total size for store is 93.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T06:33:31,382 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 25b555b60cc2797a1050b91aa2df6546: 2024-11-10T06:33:31,382 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546., storeName=25b555b60cc2797a1050b91aa2df6546/info, priority=13, startTime=1731220411345; duration=0sec 2024-11-10T06:33:31,382 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-10T06:33:31,382 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:33:31,383 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-10T06:33:31,383 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:33:31,383 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-10T06:33:31,383 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T06:33:31,384 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:33:31,384 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:33:31,384 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 25b555b60cc2797a1050b91aa2df6546:info 2024-11-10T06:33:31,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741845_1021 (size=21141) 2024-11-10T06:33:31,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741845_1021 (size=21141) 2024-11-10T06:33:31,385 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34781 {}] assignment.AssignmentManager(1363): Split request from 4999977c7e1b,36039,1731220386197, parent={ENCODED => 25b555b60cc2797a1050b91aa2df6546, NAME => 'TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-10T06:33:31,392 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34781 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=4999977c7e1b,36039,1731220386197 2024-11-10T06:33:31,396 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34781 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=25b555b60cc2797a1050b91aa2df6546, daughterA=d2063cd689984f6265f1135a900cf2f0, daughterB=130ef2252ee598b19b70e01950118900 2024-11-10T06:33:31,398 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=25b555b60cc2797a1050b91aa2df6546, daughterA=d2063cd689984f6265f1135a900cf2f0, daughterB=130ef2252ee598b19b70e01950118900 2024-11-10T06:33:31,398 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=25b555b60cc2797a1050b91aa2df6546, daughterA=d2063cd689984f6265f1135a900cf2f0, daughterB=130ef2252ee598b19b70e01950118900 2024-11-10T06:33:31,398 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=25b555b60cc2797a1050b91aa2df6546, daughterA=d2063cd689984f6265f1135a900cf2f0, daughterB=130ef2252ee598b19b70e01950118900 2024-11-10T06:33:31,405 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=25b555b60cc2797a1050b91aa2df6546, UNASSIGN}] 2024-11-10T06:33:31,406 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=25b555b60cc2797a1050b91aa2df6546, UNASSIGN 2024-11-10T06:33:31,407 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=25b555b60cc2797a1050b91aa2df6546, regionState=CLOSING, regionLocation=4999977c7e1b,36039,1731220386197 2024-11-10T06:33:31,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=25b555b60cc2797a1050b91aa2df6546, UNASSIGN because future has completed 2024-11-10T06:33:31,410 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-10T06:33:31,410 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 25b555b60cc2797a1050b91aa2df6546, server=4999977c7e1b,36039,1731220386197}] 2024-11-10T06:33:31,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:31,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:31,568 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:31,568 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-10T06:33:31,569 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 25b555b60cc2797a1050b91aa2df6546, disabling compactions & flushes 2024-11-10T06:33:31,569 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1993): waiting for 0 compactions & cache flush to complete for region TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. 2024-11-10T06:33:31,785 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/c6dccb019abe4d5eaf1c036fd7599b3f 2024-11-10T06:33:31,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/c6dccb019abe4d5eaf1c036fd7599b3f as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/c6dccb019abe4d5eaf1c036fd7599b3f 2024-11-10T06:33:31,796 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/c6dccb019abe4d5eaf1c036fd7599b3f, entries=15, sequenceid=117, filesize=20.6 K 2024-11-10T06:33:31,797 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=2.10 KB/2152 for 25b555b60cc2797a1050b91aa2df6546 in 423ms, sequenceid=117, compaction requested=true 2024-11-10T06:33:31,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 25b555b60cc2797a1050b91aa2df6546: 2024-11-10T06:33:31,798 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. 2024-11-10T06:33:31,798 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. 2024-11-10T06:33:31,798 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. after waiting 0 ms 2024-11-10T06:33:31,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 25b555b60cc2797a1050b91aa2df6546:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T06:33:31,798 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. 2024-11-10T06:33:31,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:33:31,798 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. because compaction request was cancelled 2024-11-10T06:33:31,798 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 25b555b60cc2797a1050b91aa2df6546 1/1 column families, dataSize=2.10 KB heapSize=2.50 KB 2024-11-10T06:33:31,798 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 25b555b60cc2797a1050b91aa2df6546:info 2024-11-10T06:33:31,801 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/14f99e4d34bd487baa354975eaf6780e is 1080, key is row0095/info:/1731220411375/Put/seqid=0 2024-11-10T06:33:31,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741846_1022 (size=7112) 2024-11-10T06:33:31,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741846_1022 (size=7112) 2024-11-10T06:33:31,806 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.10 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/14f99e4d34bd487baa354975eaf6780e 2024-11-10T06:33:31,811 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/.tmp/info/14f99e4d34bd487baa354975eaf6780e as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/14f99e4d34bd487baa354975eaf6780e 2024-11-10T06:33:31,816 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/14f99e4d34bd487baa354975eaf6780e, entries=2, sequenceid=123, filesize=6.9 K 2024-11-10T06:33:31,817 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for 25b555b60cc2797a1050b91aa2df6546 in 19ms, sequenceid=123, compaction requested=true 2024-11-10T06:33:31,818 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/5a049c43d6844959b07f2ce3337b4135, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/3075e107deb84ef0b76e4cbabc2c5c1f, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/438b823ef1944f43b2ae4863eae7a773, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/2ff58c985f30430693c13f4cc5a15311, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/f262870a3ca045bba962b558d5002d0a, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/c9b666c6b632464fbb6dbb76423a0657] to archive 2024-11-10T06:33:31,818 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-10T06:33:31,820 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/5a049c43d6844959b07f2ce3337b4135 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/5a049c43d6844959b07f2ce3337b4135 2024-11-10T06:33:31,821 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/3075e107deb84ef0b76e4cbabc2c5c1f to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/3075e107deb84ef0b76e4cbabc2c5c1f 2024-11-10T06:33:31,822 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/438b823ef1944f43b2ae4863eae7a773 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/438b823ef1944f43b2ae4863eae7a773 2024-11-10T06:33:31,823 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/2ff58c985f30430693c13f4cc5a15311 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/2ff58c985f30430693c13f4cc5a15311 2024-11-10T06:33:31,824 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/f262870a3ca045bba962b558d5002d0a to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/f262870a3ca045bba962b558d5002d0a 2024-11-10T06:33:31,825 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/c9b666c6b632464fbb6dbb76423a0657 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/c9b666c6b632464fbb6dbb76423a0657 2024-11-10T06:33:31,831 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-11-10T06:33:31,832 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. 2024-11-10T06:33:31,832 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 25b555b60cc2797a1050b91aa2df6546: Waiting for close lock at 1731220411569Running coprocessor pre-close hooks at 1731220411569Disabling compacts and flushes for region at 1731220411569Disabling writes for close at 1731220411798 (+229 ms)Obtaining lock to block concurrent updates at 1731220411798Preparing flush snapshotting stores in 25b555b60cc2797a1050b91aa2df6546 at 1731220411798Finished memstore snapshotting TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546., syncing WAL and waiting on mvcc, flushsize=dataSize=2152, getHeapSize=2544, getOffHeapSize=0, getCellsCount=2 at 1731220411798Flushing stores of TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. at 1731220411799 (+1 ms)Flushing 25b555b60cc2797a1050b91aa2df6546/info: creating writer at 1731220411799Flushing 25b555b60cc2797a1050b91aa2df6546/info: appending metadata at 1731220411801 (+2 ms)Flushing 25b555b60cc2797a1050b91aa2df6546/info: closing flushed file at 1731220411801Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@51da7257: reopening flushed file at 1731220411811 (+10 ms)Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for 25b555b60cc2797a1050b91aa2df6546 in 19ms, sequenceid=123, compaction requested=true at 1731220411817 (+6 ms)Writing region close event to WAL at 1731220411828 (+11 ms)Running coprocessor post-close hooks at 1731220411832 (+4 ms)Closed at 1731220411832 2024-11-10T06:33:31,834 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:31,835 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=25b555b60cc2797a1050b91aa2df6546, regionState=CLOSED 2024-11-10T06:33:31,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 25b555b60cc2797a1050b91aa2df6546, server=4999977c7e1b,36039,1731220386197 because future has completed 2024-11-10T06:33:31,840 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-10T06:33:31,840 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 25b555b60cc2797a1050b91aa2df6546, server=4999977c7e1b,36039,1731220386197 in 427 msec 2024-11-10T06:33:31,842 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-10T06:33:31,842 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=25b555b60cc2797a1050b91aa2df6546, UNASSIGN in 436 msec 2024-11-10T06:33:31,850 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:33:31,855 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=25b555b60cc2797a1050b91aa2df6546, threads=4 2024-11-10T06:33:31,857 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/0467660b560a4eff9b4abcc27d74c6ad for region: 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:31,857 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/df2d699882c74a909e13c778dc4d5c03 for region: 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:31,857 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/14f99e4d34bd487baa354975eaf6780e for region: 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:31,857 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/c6dccb019abe4d5eaf1c036fd7599b3f for region: 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:31,868 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/14f99e4d34bd487baa354975eaf6780e, top=true 2024-11-10T06:33:31,868 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/df2d699882c74a909e13c778dc4d5c03, top=true 2024-11-10T06:33:31,870 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/c6dccb019abe4d5eaf1c036fd7599b3f, top=true 2024-11-10T06:33:31,883 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-c6dccb019abe4d5eaf1c036fd7599b3f for child: 130ef2252ee598b19b70e01950118900, parent: 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:31,883 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-df2d699882c74a909e13c778dc4d5c03 for child: 130ef2252ee598b19b70e01950118900, parent: 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:31,883 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/df2d699882c74a909e13c778dc4d5c03 for region: 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:31,883 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/c6dccb019abe4d5eaf1c036fd7599b3f for region: 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:31,883 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-14f99e4d34bd487baa354975eaf6780e for child: 130ef2252ee598b19b70e01950118900, parent: 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:31,883 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/14f99e4d34bd487baa354975eaf6780e for region: 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:31,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741847_1023 (size=27) 2024-11-10T06:33:31,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741847_1023 (size=27) 2024-11-10T06:33:31,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741848_1024 (size=27) 2024-11-10T06:33:31,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741848_1024 (size=27) 2024-11-10T06:33:31,894 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/0467660b560a4eff9b4abcc27d74c6ad for region: 25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:33:31,896 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 25b555b60cc2797a1050b91aa2df6546 Daughter A: [hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/d2063cd689984f6265f1135a900cf2f0/info/0467660b560a4eff9b4abcc27d74c6ad.25b555b60cc2797a1050b91aa2df6546] storefiles, Daughter B: [hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/0467660b560a4eff9b4abcc27d74c6ad.25b555b60cc2797a1050b91aa2df6546, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-14f99e4d34bd487baa354975eaf6780e, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-c6dccb019abe4d5eaf1c036fd7599b3f, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-df2d699882c74a909e13c778dc4d5c03] storefiles. 2024-11-10T06:33:31,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741849_1025 (size=71) 2024-11-10T06:33:31,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741849_1025 (size=71) 2024-11-10T06:33:31,905 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:33:31,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741850_1026 (size=71) 2024-11-10T06:33:31,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741850_1026 (size=71) 2024-11-10T06:33:31,919 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:33:31,929 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/d2063cd689984f6265f1135a900cf2f0/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-10T06:33:31,931 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-10T06:33:31,933 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731220411933"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731220411933"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731220411933"}]},"ts":"1731220411933"} 2024-11-10T06:33:31,934 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731220411933"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731220411933"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731220411933"}]},"ts":"1731220411933"} 2024-11-10T06:33:31,934 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731220411933"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731220411933"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731220411933"}]},"ts":"1731220411933"} 2024-11-10T06:33:31,951 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d2063cd689984f6265f1135a900cf2f0, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=130ef2252ee598b19b70e01950118900, ASSIGN}] 2024-11-10T06:33:31,952 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d2063cd689984f6265f1135a900cf2f0, ASSIGN 2024-11-10T06:33:31,952 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=130ef2252ee598b19b70e01950118900, ASSIGN 2024-11-10T06:33:31,953 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d2063cd689984f6265f1135a900cf2f0, ASSIGN; state=SPLITTING_NEW, location=4999977c7e1b,36039,1731220386197; forceNewPlan=false, retain=false 2024-11-10T06:33:31,953 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=130ef2252ee598b19b70e01950118900, ASSIGN; state=SPLITTING_NEW, location=4999977c7e1b,36039,1731220386197; forceNewPlan=false, retain=false 2024-11-10T06:33:32,104 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=130ef2252ee598b19b70e01950118900, regionState=OPENING, regionLocation=4999977c7e1b,36039,1731220386197 2024-11-10T06:33:32,104 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=d2063cd689984f6265f1135a900cf2f0, regionState=OPENING, regionLocation=4999977c7e1b,36039,1731220386197 2024-11-10T06:33:32,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d2063cd689984f6265f1135a900cf2f0, ASSIGN because future has completed 2024-11-10T06:33:32,107 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure d2063cd689984f6265f1135a900cf2f0, server=4999977c7e1b,36039,1731220386197}] 2024-11-10T06:33:32,107 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=130ef2252ee598b19b70e01950118900, ASSIGN because future has completed 2024-11-10T06:33:32,108 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 130ef2252ee598b19b70e01950118900, server=4999977c7e1b,36039,1731220386197}] 2024-11-10T06:33:32,261 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0. 2024-11-10T06:33:32,261 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => d2063cd689984f6265f1135a900cf2f0, NAME => 'TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-10T06:33:32,262 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling d2063cd689984f6265f1135a900cf2f0 2024-11-10T06:33:32,262 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:33:32,262 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for d2063cd689984f6265f1135a900cf2f0 2024-11-10T06:33:32,262 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for d2063cd689984f6265f1135a900cf2f0 2024-11-10T06:33:32,263 INFO [StoreOpener-d2063cd689984f6265f1135a900cf2f0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d2063cd689984f6265f1135a900cf2f0 2024-11-10T06:33:32,264 INFO [StoreOpener-d2063cd689984f6265f1135a900cf2f0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2063cd689984f6265f1135a900cf2f0 columnFamilyName info 2024-11-10T06:33:32,264 DEBUG [StoreOpener-d2063cd689984f6265f1135a900cf2f0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:33:32,275 DEBUG [StoreOpener-d2063cd689984f6265f1135a900cf2f0-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/d2063cd689984f6265f1135a900cf2f0/info/0467660b560a4eff9b4abcc27d74c6ad.25b555b60cc2797a1050b91aa2df6546->hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/0467660b560a4eff9b4abcc27d74c6ad-bottom 2024-11-10T06:33:32,275 INFO [StoreOpener-d2063cd689984f6265f1135a900cf2f0-1 {}] regionserver.HStore(327): Store=d2063cd689984f6265f1135a900cf2f0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:33:32,275 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for d2063cd689984f6265f1135a900cf2f0 2024-11-10T06:33:32,276 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/d2063cd689984f6265f1135a900cf2f0 2024-11-10T06:33:32,277 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/d2063cd689984f6265f1135a900cf2f0 2024-11-10T06:33:32,278 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for d2063cd689984f6265f1135a900cf2f0 2024-11-10T06:33:32,278 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for d2063cd689984f6265f1135a900cf2f0 2024-11-10T06:33:32,279 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for d2063cd689984f6265f1135a900cf2f0 2024-11-10T06:33:32,280 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened d2063cd689984f6265f1135a900cf2f0; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=813524, jitterRate=0.034449294209480286}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T06:33:32,280 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d2063cd689984f6265f1135a900cf2f0 2024-11-10T06:33:32,281 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for d2063cd689984f6265f1135a900cf2f0: Running coprocessor pre-open hook at 1731220412262Writing region info on filesystem at 1731220412262Initializing all the Stores at 1731220412263 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220412263Cleaning up temporary data from old regions at 1731220412278 (+15 ms)Running coprocessor post-open hooks at 1731220412280 (+2 ms)Region opened successfully at 1731220412281 (+1 ms) 2024-11-10T06:33:32,282 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0., pid=12, masterSystemTime=1731220412258 2024-11-10T06:33:32,282 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store d2063cd689984f6265f1135a900cf2f0:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T06:33:32,282 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:33:32,282 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-10T06:33:32,282 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0. 2024-11-10T06:33:32,282 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1541): d2063cd689984f6265f1135a900cf2f0/info is initiating minor compaction (all files) 2024-11-10T06:33:32,282 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d2063cd689984f6265f1135a900cf2f0/info in TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0. 2024-11-10T06:33:32,283 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/d2063cd689984f6265f1135a900cf2f0/info/0467660b560a4eff9b4abcc27d74c6ad.25b555b60cc2797a1050b91aa2df6546->hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/0467660b560a4eff9b4abcc27d74c6ad-bottom] into tmpdir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/d2063cd689984f6265f1135a900cf2f0/.tmp, totalSize=73.6 K 2024-11-10T06:33:32,283 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0467660b560a4eff9b4abcc27d74c6ad.25b555b60cc2797a1050b91aa2df6546, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731220397122 2024-11-10T06:33:32,284 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0. 2024-11-10T06:33:32,284 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0. 2024-11-10T06:33:32,284 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900. 2024-11-10T06:33:32,285 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 130ef2252ee598b19b70e01950118900, NAME => 'TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-10T06:33:32,285 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 130ef2252ee598b19b70e01950118900 2024-11-10T06:33:32,285 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:33:32,285 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 130ef2252ee598b19b70e01950118900 2024-11-10T06:33:32,285 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=d2063cd689984f6265f1135a900cf2f0, regionState=OPEN, openSeqNum=127, regionLocation=4999977c7e1b,36039,1731220386197 2024-11-10T06:33:32,285 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 130ef2252ee598b19b70e01950118900 2024-11-10T06:33:32,286 INFO [StoreOpener-130ef2252ee598b19b70e01950118900-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 130ef2252ee598b19b70e01950118900 2024-11-10T06:33:32,287 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36039 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-10T06:33:32,287 INFO [StoreOpener-130ef2252ee598b19b70e01950118900-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 130ef2252ee598b19b70e01950118900 columnFamilyName info 2024-11-10T06:33:32,287 DEBUG [StoreOpener-130ef2252ee598b19b70e01950118900-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:33:32,287 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-10T06:33:32,287 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-10T06:33:32,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure d2063cd689984f6265f1135a900cf2f0, server=4999977c7e1b,36039,1731220386197 because future has completed 2024-11-10T06:33:32,291 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-10T06:33:32,291 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure d2063cd689984f6265f1135a900cf2f0, server=4999977c7e1b,36039,1731220386197 in 182 msec 2024-11-10T06:33:32,293 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d2063cd689984f6265f1135a900cf2f0, ASSIGN in 340 msec 2024-11-10T06:33:32,295 DEBUG [StoreOpener-130ef2252ee598b19b70e01950118900-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/0467660b560a4eff9b4abcc27d74c6ad.25b555b60cc2797a1050b91aa2df6546->hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/0467660b560a4eff9b4abcc27d74c6ad-top 2024-11-10T06:33:32,304 DEBUG [StoreOpener-130ef2252ee598b19b70e01950118900-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-14f99e4d34bd487baa354975eaf6780e 2024-11-10T06:33:32,306 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2063cd689984f6265f1135a900cf2f0#info#compaction#64 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T06:33:32,306 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/d2063cd689984f6265f1135a900cf2f0/.tmp/info/0707d6f9fffc4bb8bd158dc7ebeb6929 is 1080, key is row0001/info:/1731220397122/Put/seqid=0 2024-11-10T06:33:32,308 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/.tmp/info/c2ffdcd709cb4f068631e79184e9d67e is 193, key is TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900./info:regioninfo/1731220412104/Put/seqid=0 2024-11-10T06:33:32,309 DEBUG [StoreOpener-130ef2252ee598b19b70e01950118900-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-c6dccb019abe4d5eaf1c036fd7599b3f 2024-11-10T06:33:32,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741851_1027 (size=70862) 2024-11-10T06:33:32,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741851_1027 (size=70862) 2024-11-10T06:33:32,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741852_1028 (size=9847) 2024-11-10T06:33:32,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741852_1028 (size=9847) 2024-11-10T06:33:32,314 DEBUG [StoreOpener-130ef2252ee598b19b70e01950118900-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-df2d699882c74a909e13c778dc4d5c03 2024-11-10T06:33:32,314 INFO [StoreOpener-130ef2252ee598b19b70e01950118900-1 {}] regionserver.HStore(327): Store=130ef2252ee598b19b70e01950118900/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:33:32,314 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 130ef2252ee598b19b70e01950118900 2024-11-10T06:33:32,314 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/.tmp/info/c2ffdcd709cb4f068631e79184e9d67e 2024-11-10T06:33:32,315 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900 2024-11-10T06:33:32,316 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900 2024-11-10T06:33:32,317 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 130ef2252ee598b19b70e01950118900 2024-11-10T06:33:32,317 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 130ef2252ee598b19b70e01950118900 2024-11-10T06:33:32,318 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/d2063cd689984f6265f1135a900cf2f0/.tmp/info/0707d6f9fffc4bb8bd158dc7ebeb6929 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/d2063cd689984f6265f1135a900cf2f0/info/0707d6f9fffc4bb8bd158dc7ebeb6929 2024-11-10T06:33:32,319 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 130ef2252ee598b19b70e01950118900 2024-11-10T06:33:32,320 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 130ef2252ee598b19b70e01950118900; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=758441, jitterRate=-0.0355924516916275}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T06:33:32,320 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 130ef2252ee598b19b70e01950118900 2024-11-10T06:33:32,320 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 130ef2252ee598b19b70e01950118900: Running coprocessor pre-open hook at 1731220412285Writing region info on filesystem at 1731220412285Initializing all the Stores at 1731220412286 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220412286Cleaning up temporary data from old regions at 1731220412317 (+31 ms)Running coprocessor post-open hooks at 1731220412320 (+3 ms)Region opened successfully at 1731220412320 2024-11-10T06:33:32,321 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900., pid=13, masterSystemTime=1731220412258 2024-11-10T06:33:32,321 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 130ef2252ee598b19b70e01950118900:info, priority=-2147483648, current under compaction store size is 2 2024-11-10T06:33:32,321 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:33:32,321 DEBUG [RS:0;4999977c7e1b:36039-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-10T06:33:32,324 DEBUG [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900. 2024-11-10T06:33:32,324 INFO [RS_OPEN_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900. 2024-11-10T06:33:32,324 INFO [RS:0;4999977c7e1b:36039-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900. 2024-11-10T06:33:32,324 DEBUG [RS:0;4999977c7e1b:36039-longCompactions-0 {}] regionserver.HStore(1541): 130ef2252ee598b19b70e01950118900/info is initiating minor compaction (all files) 2024-11-10T06:33:32,324 INFO [RS:0;4999977c7e1b:36039-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 130ef2252ee598b19b70e01950118900/info in TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900. 2024-11-10T06:33:32,324 INFO [RS:0;4999977c7e1b:36039-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/0467660b560a4eff9b4abcc27d74c6ad.25b555b60cc2797a1050b91aa2df6546->hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/0467660b560a4eff9b4abcc27d74c6ad-top, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-df2d699882c74a909e13c778dc4d5c03, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-c6dccb019abe4d5eaf1c036fd7599b3f, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-14f99e4d34bd487baa354975eaf6780e] into tmpdir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp, totalSize=120.8 K 2024-11-10T06:33:32,325 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=130ef2252ee598b19b70e01950118900, regionState=OPEN, openSeqNum=127, regionLocation=4999977c7e1b,36039,1731220386197 2024-11-10T06:33:32,325 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in d2063cd689984f6265f1135a900cf2f0/info of d2063cd689984f6265f1135a900cf2f0 into 0707d6f9fffc4bb8bd158dc7ebeb6929(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T06:33:32,325 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d2063cd689984f6265f1135a900cf2f0: 2024-11-10T06:33:32,325 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0., storeName=d2063cd689984f6265f1135a900cf2f0/info, priority=15, startTime=1731220412282; duration=0sec 2024-11-10T06:33:32,325 DEBUG [RS:0;4999977c7e1b:36039-longCompactions-0 {}] compactions.Compactor(225): Compacting 0467660b560a4eff9b4abcc27d74c6ad.25b555b60cc2797a1050b91aa2df6546, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1731220397122 2024-11-10T06:33:32,325 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:33:32,325 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2063cd689984f6265f1135a900cf2f0:info 2024-11-10T06:33:32,325 DEBUG [RS:0;4999977c7e1b:36039-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-df2d699882c74a909e13c778dc4d5c03, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1731220411322 2024-11-10T06:33:32,326 DEBUG [RS:0;4999977c7e1b:36039-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-c6dccb019abe4d5eaf1c036fd7599b3f, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1731220411347 2024-11-10T06:33:32,326 DEBUG [RS:0;4999977c7e1b:36039-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-14f99e4d34bd487baa354975eaf6780e, keycount=2, bloomtype=ROW, size=6.9 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731220411375 2024-11-10T06:33:32,327 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 130ef2252ee598b19b70e01950118900, server=4999977c7e1b,36039,1731220386197 because future has completed 2024-11-10T06:33:32,331 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-10T06:33:32,331 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 130ef2252ee598b19b70e01950118900, server=4999977c7e1b,36039,1731220386197 in 220 msec 2024-11-10T06:33:32,333 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-10T06:33:32,333 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=130ef2252ee598b19b70e01950118900, ASSIGN in 380 msec 2024-11-10T06:33:32,335 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=25b555b60cc2797a1050b91aa2df6546, daughterA=d2063cd689984f6265f1135a900cf2f0, daughterB=130ef2252ee598b19b70e01950118900 in 941 msec 2024-11-10T06:33:32,337 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/.tmp/ns/6588a8fab4a648178394c508e7e20311 is 43, key is default/ns:d/1731220386988/Put/seqid=0 2024-11-10T06:33:32,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741853_1029 (size=5153) 2024-11-10T06:33:32,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741853_1029 (size=5153) 2024-11-10T06:33:32,346 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/.tmp/ns/6588a8fab4a648178394c508e7e20311 2024-11-10T06:33:32,358 INFO [RS:0;4999977c7e1b:36039-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 130ef2252ee598b19b70e01950118900#info#compaction#67 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T06:33:32,358 DEBUG [RS:0;4999977c7e1b:36039-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/1cc71627f303476ebe854f7fcdb818d5 is 1080, key is row0062/info:/1731220409313/Put/seqid=0 2024-11-10T06:33:32,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741854_1030 (size=43081) 2024-11-10T06:33:32,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741854_1030 (size=43081) 2024-11-10T06:33:32,367 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/.tmp/table/f20a5fa0b4fa4d5e9c7ec8a0e1a3a085 is 65, key is TestLogRolling-testLogRolling/table:state/1731220387384/Put/seqid=0 2024-11-10T06:33:32,370 DEBUG [RS:0;4999977c7e1b:36039-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/1cc71627f303476ebe854f7fcdb818d5 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/1cc71627f303476ebe854f7fcdb818d5 2024-11-10T06:33:32,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741855_1031 (size=5340) 2024-11-10T06:33:32,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741855_1031 (size=5340) 2024-11-10T06:33:32,373 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/.tmp/table/f20a5fa0b4fa4d5e9c7ec8a0e1a3a085 2024-11-10T06:33:32,377 INFO [RS:0;4999977c7e1b:36039-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 130ef2252ee598b19b70e01950118900/info of 130ef2252ee598b19b70e01950118900 into 1cc71627f303476ebe854f7fcdb818d5(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T06:33:32,377 DEBUG [RS:0;4999977c7e1b:36039-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 130ef2252ee598b19b70e01950118900: 2024-11-10T06:33:32,377 INFO [RS:0;4999977c7e1b:36039-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900., storeName=130ef2252ee598b19b70e01950118900/info, priority=12, startTime=1731220412321; duration=0sec 2024-11-10T06:33:32,377 DEBUG [RS:0;4999977c7e1b:36039-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:33:32,377 DEBUG [RS:0;4999977c7e1b:36039-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 130ef2252ee598b19b70e01950118900:info 2024-11-10T06:33:32,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/.tmp/info/c2ffdcd709cb4f068631e79184e9d67e as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/info/c2ffdcd709cb4f068631e79184e9d67e 2024-11-10T06:33:32,383 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/info/c2ffdcd709cb4f068631e79184e9d67e, entries=30, sequenceid=17, filesize=9.6 K 2024-11-10T06:33:32,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/.tmp/ns/6588a8fab4a648178394c508e7e20311 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/ns/6588a8fab4a648178394c508e7e20311 2024-11-10T06:33:32,388 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/ns/6588a8fab4a648178394c508e7e20311, entries=2, sequenceid=17, filesize=5.0 K 2024-11-10T06:33:32,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/.tmp/table/f20a5fa0b4fa4d5e9c7ec8a0e1a3a085 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/table/f20a5fa0b4fa4d5e9c7ec8a0e1a3a085 2024-11-10T06:33:32,393 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/table/f20a5fa0b4fa4d5e9c7ec8a0e1a3a085, entries=2, sequenceid=17, filesize=5.2 K 2024-11-10T06:33:32,394 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 107ms, sequenceid=17, compaction requested=false 2024-11-10T06:33:32,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-10T06:33:32,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:32,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:33,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34170 deadline: 1731220423379, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. is not online on 4999977c7e1b,36039,1731220386197 2024-11-10T06:33:33,380 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546., hostname=4999977c7e1b,36039,1731220386197, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546., hostname=4999977c7e1b,36039,1731220386197, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. is not online on 4999977c7e1b,36039,1731220386197 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-10T06:33:33,380 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546., hostname=4999977c7e1b,36039,1731220386197, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546. is not online on 4999977c7e1b,36039,1731220386197 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-10T06:33:33,380 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731220387024.25b555b60cc2797a1050b91aa2df6546., hostname=4999977c7e1b,36039,1731220386197, seqNum=2 from cache 2024-11-10T06:33:33,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:33,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:34,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:34,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:35,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:35,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:36,132 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T06:33:36,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:36,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:36,832 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:36,832 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:36,833 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:36,833 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:36,833 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:36,833 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:36,833 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:36,834 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:36,850 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:36,850 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:36,850 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:36,851 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:36,851 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:36,851 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:36,854 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:36,854 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:36,854 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:36,856 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:37,363 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-10T06:33:37,364 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:37,364 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:37,364 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:37,364 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:37,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:37,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:37,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:37,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:37,384 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:37,384 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:37,384 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:37,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:37,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:37,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:37,388 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:37,389 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:37,389 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:37,391 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T06:33:37,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:37,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:38,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:38,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:39,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:39,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:40,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:40,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:41,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:41,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:42,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:42,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:43,411 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900., hostname=4999977c7e1b,36039,1731220386197, seqNum=127] 2024-11-10T06:33:43,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(8855): Flush requested on 130ef2252ee598b19b70e01950118900 2024-11-10T06:33:43,421 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 130ef2252ee598b19b70e01950118900 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-10T06:33:43,427 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/bc6dc6048f2c4510b0a7b80fce144118 is 1080, key is row0097/info:/1731220423412/Put/seqid=0 2024-11-10T06:33:43,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741856_1032 (size=12516) 2024-11-10T06:33:43,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741856_1032 (size=12516) 2024-11-10T06:33:43,432 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/bc6dc6048f2c4510b0a7b80fce144118 2024-11-10T06:33:43,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/bc6dc6048f2c4510b0a7b80fce144118 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/bc6dc6048f2c4510b0a7b80fce144118 2024-11-10T06:33:43,443 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/bc6dc6048f2c4510b0a7b80fce144118, entries=7, sequenceid=137, filesize=12.2 K 2024-11-10T06:33:43,444 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for 130ef2252ee598b19b70e01950118900 in 23ms, sequenceid=137, compaction requested=false 2024-11-10T06:33:43,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 130ef2252ee598b19b70e01950118900: 2024-11-10T06:33:43,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(8855): Flush requested on 130ef2252ee598b19b70e01950118900 2024-11-10T06:33:43,445 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 130ef2252ee598b19b70e01950118900 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-10T06:33:43,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/3a4c8d9ed6d044a9af102386778f11f6 is 1080, key is row0104/info:/1731220423422/Put/seqid=0 2024-11-10T06:33:43,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741857_1033 (size=22238) 2024-11-10T06:33:43,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741857_1033 (size=22238) 2024-11-10T06:33:43,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/3a4c8d9ed6d044a9af102386778f11f6 2024-11-10T06:33:43,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:43,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:43,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/3a4c8d9ed6d044a9af102386778f11f6 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/3a4c8d9ed6d044a9af102386778f11f6 2024-11-10T06:33:43,464 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/3a4c8d9ed6d044a9af102386778f11f6, entries=16, sequenceid=156, filesize=21.7 K 2024-11-10T06:33:43,465 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=9.46 KB/9684 for 130ef2252ee598b19b70e01950118900 in 20ms, sequenceid=156, compaction requested=true 2024-11-10T06:33:43,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 130ef2252ee598b19b70e01950118900: 2024-11-10T06:33:43,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 130ef2252ee598b19b70e01950118900:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T06:33:43,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:33:43,465 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T06:33:43,466 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 77835 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T06:33:43,466 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1541): 130ef2252ee598b19b70e01950118900/info is initiating minor compaction (all files) 2024-11-10T06:33:43,466 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 130ef2252ee598b19b70e01950118900/info in TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900. 2024-11-10T06:33:43,466 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/1cc71627f303476ebe854f7fcdb818d5, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/bc6dc6048f2c4510b0a7b80fce144118, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/3a4c8d9ed6d044a9af102386778f11f6] into tmpdir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp, totalSize=76.0 K 2024-11-10T06:33:43,466 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1cc71627f303476ebe854f7fcdb818d5, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731220409313 2024-11-10T06:33:43,467 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting bc6dc6048f2c4510b0a7b80fce144118, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731220423412 2024-11-10T06:33:43,467 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3a4c8d9ed6d044a9af102386778f11f6, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731220423422 2024-11-10T06:33:43,477 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 130ef2252ee598b19b70e01950118900#info#compaction#71 average throughput is 59.52 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T06:33:43,477 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/0fd2deac52274b4f9acd2608b15489ec is 1080, key is row0062/info:/1731220409313/Put/seqid=0 2024-11-10T06:33:43,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741858_1034 (size=68045) 2024-11-10T06:33:43,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741858_1034 (size=68045) 2024-11-10T06:33:43,487 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/0fd2deac52274b4f9acd2608b15489ec as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/0fd2deac52274b4f9acd2608b15489ec 2024-11-10T06:33:43,492 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 130ef2252ee598b19b70e01950118900/info of 130ef2252ee598b19b70e01950118900 into 0fd2deac52274b4f9acd2608b15489ec(size=66.5 K), total size for store is 66.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T06:33:43,492 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 130ef2252ee598b19b70e01950118900: 2024-11-10T06:33:43,492 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900., storeName=130ef2252ee598b19b70e01950118900/info, priority=13, startTime=1731220423465; duration=0sec 2024-11-10T06:33:43,492 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:33:43,492 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 130ef2252ee598b19b70e01950118900:info 2024-11-10T06:33:44,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:44,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:45,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:45,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:45,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(8855): Flush requested on 130ef2252ee598b19b70e01950118900 2024-11-10T06:33:45,462 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 130ef2252ee598b19b70e01950118900 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-10T06:33:45,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/0ef7b7165c0d4922aa13f2fe20caa6a8 is 1080, key is row0120/info:/1731220423446/Put/seqid=0 2024-11-10T06:33:45,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741859_1035 (size=15750) 2024-11-10T06:33:45,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741859_1035 (size=15750) 2024-11-10T06:33:45,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/0ef7b7165c0d4922aa13f2fe20caa6a8 2024-11-10T06:33:45,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/0ef7b7165c0d4922aa13f2fe20caa6a8 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/0ef7b7165c0d4922aa13f2fe20caa6a8 2024-11-10T06:33:45,484 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/0ef7b7165c0d4922aa13f2fe20caa6a8, entries=10, sequenceid=170, filesize=15.4 K 2024-11-10T06:33:45,485 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=14.71 KB/15064 for 130ef2252ee598b19b70e01950118900 in 23ms, sequenceid=170, compaction requested=false 2024-11-10T06:33:45,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 130ef2252ee598b19b70e01950118900: 2024-11-10T06:33:45,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(8855): Flush requested on 130ef2252ee598b19b70e01950118900 2024-11-10T06:33:45,487 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 130ef2252ee598b19b70e01950118900 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-10T06:33:45,491 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/da32821a2a47425183f70b7686f8b0e1 is 1080, key is row0130/info:/1731220425463/Put/seqid=0 2024-11-10T06:33:45,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741860_1036 (size=22238) 2024-11-10T06:33:45,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741860_1036 (size=22238) 2024-11-10T06:33:45,523 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=189 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/da32821a2a47425183f70b7686f8b0e1 2024-11-10T06:33:45,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/da32821a2a47425183f70b7686f8b0e1 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/da32821a2a47425183f70b7686f8b0e1 2024-11-10T06:33:45,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=130ef2252ee598b19b70e01950118900, server=4999977c7e1b,36039,1731220386197 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-10T06:33:45,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34170 deadline: 1731220435534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=130ef2252ee598b19b70e01950118900, server=4999977c7e1b,36039,1731220386197 2024-11-10T06:33:45,535 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900., hostname=4999977c7e1b,36039,1731220386197, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900., hostname=4999977c7e1b,36039,1731220386197, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=130ef2252ee598b19b70e01950118900, server=4999977c7e1b,36039,1731220386197 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-10T06:33:45,536 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900., hostname=4999977c7e1b,36039,1731220386197, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=130ef2252ee598b19b70e01950118900, server=4999977c7e1b,36039,1731220386197 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-10T06:33:45,536 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900., hostname=4999977c7e1b,36039,1731220386197, seqNum=127 because the exception is null or not the one we care about 2024-11-10T06:33:45,536 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/da32821a2a47425183f70b7686f8b0e1, entries=16, sequenceid=189, filesize=21.7 K 2024-11-10T06:33:45,537 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=13.66 KB/13988 for 130ef2252ee598b19b70e01950118900 in 50ms, sequenceid=189, compaction requested=true 2024-11-10T06:33:45,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 130ef2252ee598b19b70e01950118900: 2024-11-10T06:33:45,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 130ef2252ee598b19b70e01950118900:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T06:33:45,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:33:45,538 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T06:33:45,539 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 106033 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T06:33:45,539 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1541): 130ef2252ee598b19b70e01950118900/info is initiating minor compaction (all files) 2024-11-10T06:33:45,539 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 130ef2252ee598b19b70e01950118900/info in TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900. 2024-11-10T06:33:45,539 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/0fd2deac52274b4f9acd2608b15489ec, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/0ef7b7165c0d4922aa13f2fe20caa6a8, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/da32821a2a47425183f70b7686f8b0e1] into tmpdir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp, totalSize=103.5 K 2024-11-10T06:33:45,539 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0fd2deac52274b4f9acd2608b15489ec, keycount=58, bloomtype=ROW, size=66.5 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731220409313 2024-11-10T06:33:45,540 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0ef7b7165c0d4922aa13f2fe20caa6a8, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731220423446 2024-11-10T06:33:45,540 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting da32821a2a47425183f70b7686f8b0e1, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1731220425463 2024-11-10T06:33:45,552 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 130ef2252ee598b19b70e01950118900#info#compaction#74 average throughput is 43.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T06:33:45,553 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/9e54a5fb755440c898a94311d591caa3 is 1080, key is row0062/info:/1731220409313/Put/seqid=0 2024-11-10T06:33:45,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741861_1037 (size=96252) 2024-11-10T06:33:45,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741861_1037 (size=96252) 2024-11-10T06:33:45,564 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/9e54a5fb755440c898a94311d591caa3 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/9e54a5fb755440c898a94311d591caa3 2024-11-10T06:33:45,570 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 130ef2252ee598b19b70e01950118900/info of 130ef2252ee598b19b70e01950118900 into 9e54a5fb755440c898a94311d591caa3(size=94.0 K), total size for store is 94.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T06:33:45,570 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 130ef2252ee598b19b70e01950118900: 2024-11-10T06:33:45,570 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900., storeName=130ef2252ee598b19b70e01950118900/info, priority=13, startTime=1731220425537; duration=0sec 2024-11-10T06:33:45,570 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:33:45,570 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 130ef2252ee598b19b70e01950118900:info 2024-11-10T06:33:46,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:46,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:47,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:47,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:47,485 INFO [master/4999977c7e1b:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-10T06:33:47,486 INFO [master/4999977c7e1b:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-10T06:33:48,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:48,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:49,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:49,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:50,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:50,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:51,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:51,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:51,959 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-10T06:33:52,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:52,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:53,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:53,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:54,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:54,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:55,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:55,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:55,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(8855): Flush requested on 130ef2252ee598b19b70e01950118900 2024-11-10T06:33:55,627 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 130ef2252ee598b19b70e01950118900 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-10T06:33:55,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/04ac8c9434e640a593169b945ff4278a is 1080, key is row0146/info:/1731220425488/Put/seqid=0 2024-11-10T06:33:55,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741862_1038 (size=20078) 2024-11-10T06:33:55,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741862_1038 (size=20078) 2024-11-10T06:33:55,638 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/04ac8c9434e640a593169b945ff4278a 2024-11-10T06:33:55,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/04ac8c9434e640a593169b945ff4278a as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/04ac8c9434e640a593169b945ff4278a 2024-11-10T06:33:55,647 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/04ac8c9434e640a593169b945ff4278a, entries=14, sequenceid=207, filesize=19.6 K 2024-11-10T06:33:55,648 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=1.05 KB/1076 for 130ef2252ee598b19b70e01950118900 in 21ms, sequenceid=207, compaction requested=false 2024-11-10T06:33:55,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 130ef2252ee598b19b70e01950118900: 2024-11-10T06:33:56,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:56,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:57,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:57,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:57,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(8855): Flush requested on 130ef2252ee598b19b70e01950118900 2024-11-10T06:33:57,639 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 130ef2252ee598b19b70e01950118900 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-10T06:33:57,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/2835aa2261be45fe9b47d8a2a77107a0 is 1080, key is row0160/info:/1731220435629/Put/seqid=0 2024-11-10T06:33:57,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741863_1039 (size=12516) 2024-11-10T06:33:57,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741863_1039 (size=12516) 2024-11-10T06:33:57,649 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/2835aa2261be45fe9b47d8a2a77107a0 2024-11-10T06:33:57,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/2835aa2261be45fe9b47d8a2a77107a0 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/2835aa2261be45fe9b47d8a2a77107a0 2024-11-10T06:33:57,660 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/2835aa2261be45fe9b47d8a2a77107a0, entries=7, sequenceid=217, filesize=12.2 K 2024-11-10T06:33:57,661 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 130ef2252ee598b19b70e01950118900 in 22ms, sequenceid=217, compaction requested=true 2024-11-10T06:33:57,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 130ef2252ee598b19b70e01950118900: 2024-11-10T06:33:57,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 130ef2252ee598b19b70e01950118900:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T06:33:57,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:33:57,661 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T06:33:57,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(8855): Flush requested on 130ef2252ee598b19b70e01950118900 2024-11-10T06:33:57,662 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 130ef2252ee598b19b70e01950118900 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-10T06:33:57,662 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128846 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T06:33:57,662 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1541): 130ef2252ee598b19b70e01950118900/info is initiating minor compaction (all files) 2024-11-10T06:33:57,662 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 130ef2252ee598b19b70e01950118900/info in TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900. 2024-11-10T06:33:57,662 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/9e54a5fb755440c898a94311d591caa3, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/04ac8c9434e640a593169b945ff4278a, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/2835aa2261be45fe9b47d8a2a77107a0] into tmpdir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp, totalSize=125.8 K 2024-11-10T06:33:57,663 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9e54a5fb755440c898a94311d591caa3, keycount=84, bloomtype=ROW, size=94.0 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1731220409313 2024-11-10T06:33:57,663 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting 04ac8c9434e640a593169b945ff4278a, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1731220425488 2024-11-10T06:33:57,664 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2835aa2261be45fe9b47d8a2a77107a0, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1731220435629 2024-11-10T06:33:57,666 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/9eaa18e0b5cb4fd3856a14a12a840c81 is 1080, key is row0167/info:/1731220437640/Put/seqid=0 2024-11-10T06:33:57,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741864_1040 (size=20078) 2024-11-10T06:33:57,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741864_1040 (size=20078) 2024-11-10T06:33:57,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/9eaa18e0b5cb4fd3856a14a12a840c81 2024-11-10T06:33:57,677 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 130ef2252ee598b19b70e01950118900#info#compaction#78 average throughput is 53.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T06:33:57,678 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/5ccf0ee163e644929e538ec192fdd730 is 1080, key is row0062/info:/1731220409313/Put/seqid=0 2024-11-10T06:33:57,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/9eaa18e0b5cb4fd3856a14a12a840c81 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/9eaa18e0b5cb4fd3856a14a12a840c81 2024-11-10T06:33:57,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741865_1041 (size=118996) 2024-11-10T06:33:57,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741865_1041 (size=118996) 2024-11-10T06:33:57,687 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/9eaa18e0b5cb4fd3856a14a12a840c81, entries=14, sequenceid=234, filesize=19.6 K 2024-11-10T06:33:57,687 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/5ccf0ee163e644929e538ec192fdd730 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/5ccf0ee163e644929e538ec192fdd730 2024-11-10T06:33:57,688 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for 130ef2252ee598b19b70e01950118900 in 26ms, sequenceid=234, compaction requested=false 2024-11-10T06:33:57,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 130ef2252ee598b19b70e01950118900: 2024-11-10T06:33:57,693 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 130ef2252ee598b19b70e01950118900/info of 130ef2252ee598b19b70e01950118900 into 5ccf0ee163e644929e538ec192fdd730(size=116.2 K), total size for store is 135.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T06:33:57,693 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 130ef2252ee598b19b70e01950118900: 2024-11-10T06:33:57,693 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900., storeName=130ef2252ee598b19b70e01950118900/info, priority=13, startTime=1731220437661; duration=0sec 2024-11-10T06:33:57,693 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:33:57,693 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 130ef2252ee598b19b70e01950118900:info 2024-11-10T06:33:58,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:58,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:59,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:59,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:33:59,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(8855): Flush requested on 130ef2252ee598b19b70e01950118900 2024-11-10T06:33:59,685 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 130ef2252ee598b19b70e01950118900 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-10T06:33:59,689 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/d215b98efb764ccebae20fa3d0322372 is 1080, key is row0181/info:/1731220437663/Put/seqid=0 2024-11-10T06:33:59,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741866_1042 (size=19000) 2024-11-10T06:33:59,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741866_1042 (size=19000) 2024-11-10T06:33:59,696 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/d215b98efb764ccebae20fa3d0322372 2024-11-10T06:33:59,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/d215b98efb764ccebae20fa3d0322372 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/d215b98efb764ccebae20fa3d0322372 2024-11-10T06:33:59,707 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/d215b98efb764ccebae20fa3d0322372, entries=13, sequenceid=251, filesize=18.6 K 2024-11-10T06:33:59,708 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for 130ef2252ee598b19b70e01950118900 in 23ms, sequenceid=251, compaction requested=true 2024-11-10T06:33:59,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 130ef2252ee598b19b70e01950118900: 2024-11-10T06:33:59,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 130ef2252ee598b19b70e01950118900:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T06:33:59,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:33:59,708 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T06:33:59,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(8855): Flush requested on 130ef2252ee598b19b70e01950118900 2024-11-10T06:33:59,709 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 130ef2252ee598b19b70e01950118900 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-10T06:33:59,709 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 158074 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T06:33:59,710 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1541): 130ef2252ee598b19b70e01950118900/info is initiating minor compaction (all files) 2024-11-10T06:33:59,710 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 130ef2252ee598b19b70e01950118900/info in TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900. 2024-11-10T06:33:59,710 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/5ccf0ee163e644929e538ec192fdd730, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/9eaa18e0b5cb4fd3856a14a12a840c81, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/d215b98efb764ccebae20fa3d0322372] into tmpdir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp, totalSize=154.4 K 2024-11-10T06:33:59,710 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5ccf0ee163e644929e538ec192fdd730, keycount=105, bloomtype=ROW, size=116.2 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1731220409313 2024-11-10T06:33:59,711 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9eaa18e0b5cb4fd3856a14a12a840c81, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1731220437640 2024-11-10T06:33:59,711 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting d215b98efb764ccebae20fa3d0322372, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731220437663 2024-11-10T06:33:59,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/710c94f005794489aa452378798e98a7 is 1080, key is row0194/info:/1731220439686/Put/seqid=0 2024-11-10T06:33:59,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741867_1043 (size=19010) 2024-11-10T06:33:59,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741867_1043 (size=19010) 2024-11-10T06:33:59,724 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=267 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/710c94f005794489aa452378798e98a7 2024-11-10T06:33:59,727 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 130ef2252ee598b19b70e01950118900#info#compaction#81 average throughput is 33.86 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T06:33:59,728 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/fb6b92000dd84cf0b71f8d98cbde5b22 is 1080, key is row0062/info:/1731220409313/Put/seqid=0 2024-11-10T06:33:59,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/710c94f005794489aa452378798e98a7 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/710c94f005794489aa452378798e98a7 2024-11-10T06:33:59,736 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/710c94f005794489aa452378798e98a7, entries=13, sequenceid=267, filesize=18.6 K 2024-11-10T06:33:59,737 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=15.76 KB/16140 for 130ef2252ee598b19b70e01950118900 in 28ms, sequenceid=267, compaction requested=false 2024-11-10T06:33:59,737 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 130ef2252ee598b19b70e01950118900: 2024-11-10T06:33:59,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(8855): Flush requested on 130ef2252ee598b19b70e01950118900 2024-11-10T06:33:59,739 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 130ef2252ee598b19b70e01950118900 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-10T06:33:59,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741868_1044 (size=148409) 2024-11-10T06:33:59,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741868_1044 (size=148409) 2024-11-10T06:33:59,744 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/81883e73e6224010ab0aa4bbfcd591f9 is 1080, key is row0207/info:/1731220439710/Put/seqid=0 2024-11-10T06:33:59,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741869_1045 (size=22254) 2024-11-10T06:33:59,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741869_1045 (size=22254) 2024-11-10T06:33:59,750 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/fb6b92000dd84cf0b71f8d98cbde5b22 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/fb6b92000dd84cf0b71f8d98cbde5b22 2024-11-10T06:33:59,750 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/81883e73e6224010ab0aa4bbfcd591f9 2024-11-10T06:33:59,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/81883e73e6224010ab0aa4bbfcd591f9 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/81883e73e6224010ab0aa4bbfcd591f9 2024-11-10T06:33:59,756 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 130ef2252ee598b19b70e01950118900/info of 130ef2252ee598b19b70e01950118900 into fb6b92000dd84cf0b71f8d98cbde5b22(size=144.9 K), total size for store is 163.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T06:33:59,756 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 130ef2252ee598b19b70e01950118900: 2024-11-10T06:33:59,756 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900., storeName=130ef2252ee598b19b70e01950118900/info, priority=13, startTime=1731220439708; duration=0sec 2024-11-10T06:33:59,756 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:33:59,756 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 130ef2252ee598b19b70e01950118900:info 2024-11-10T06:33:59,759 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/81883e73e6224010ab0aa4bbfcd591f9, entries=16, sequenceid=286, filesize=21.7 K 2024-11-10T06:33:59,760 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=2.10 KB/2152 for 130ef2252ee598b19b70e01950118900 in 22ms, sequenceid=286, compaction requested=true 2024-11-10T06:33:59,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 130ef2252ee598b19b70e01950118900: 2024-11-10T06:33:59,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 130ef2252ee598b19b70e01950118900:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T06:33:59,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:33:59,760 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T06:33:59,761 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 189673 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T06:33:59,761 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1541): 130ef2252ee598b19b70e01950118900/info is initiating minor compaction (all files) 2024-11-10T06:33:59,761 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 130ef2252ee598b19b70e01950118900/info in TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900. 2024-11-10T06:33:59,761 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/fb6b92000dd84cf0b71f8d98cbde5b22, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/710c94f005794489aa452378798e98a7, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/81883e73e6224010ab0aa4bbfcd591f9] into tmpdir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp, totalSize=185.2 K 2024-11-10T06:33:59,762 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting fb6b92000dd84cf0b71f8d98cbde5b22, keycount=132, bloomtype=ROW, size=144.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731220409313 2024-11-10T06:33:59,762 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting 710c94f005794489aa452378798e98a7, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=267, earliestPutTs=1731220439686 2024-11-10T06:33:59,762 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting 81883e73e6224010ab0aa4bbfcd591f9, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1731220439710 2024-11-10T06:33:59,774 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 130ef2252ee598b19b70e01950118900#info#compaction#83 average throughput is 55.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T06:33:59,774 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/d413470d562445759f62b1cdfd8477eb is 1080, key is row0062/info:/1731220409313/Put/seqid=0 2024-11-10T06:33:59,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741870_1046 (size=179807) 2024-11-10T06:33:59,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741870_1046 (size=179807) 2024-11-10T06:33:59,783 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/d413470d562445759f62b1cdfd8477eb as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/d413470d562445759f62b1cdfd8477eb 2024-11-10T06:33:59,788 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 130ef2252ee598b19b70e01950118900/info of 130ef2252ee598b19b70e01950118900 into d413470d562445759f62b1cdfd8477eb(size=175.6 K), total size for store is 175.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T06:33:59,788 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 130ef2252ee598b19b70e01950118900: 2024-11-10T06:33:59,788 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900., storeName=130ef2252ee598b19b70e01950118900/info, priority=13, startTime=1731220439760; duration=0sec 2024-11-10T06:33:59,788 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:33:59,788 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 130ef2252ee598b19b70e01950118900:info 2024-11-10T06:34:00,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:34:00,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:34:01,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:34:01,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:34:01,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(8855): Flush requested on 130ef2252ee598b19b70e01950118900 2024-11-10T06:34:01,750 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 130ef2252ee598b19b70e01950118900 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-10T06:34:01,755 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/64528111bc2e449983c1ab2dec796b23 is 1080, key is row0223/info:/1731220439739/Put/seqid=0 2024-11-10T06:34:01,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741871_1047 (size=12523) 2024-11-10T06:34:01,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741871_1047 (size=12523) 2024-11-10T06:34:01,764 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/64528111bc2e449983c1ab2dec796b23 2024-11-10T06:34:01,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/64528111bc2e449983c1ab2dec796b23 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/64528111bc2e449983c1ab2dec796b23 2024-11-10T06:34:01,776 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/64528111bc2e449983c1ab2dec796b23, entries=7, sequenceid=298, filesize=12.2 K 2024-11-10T06:34:01,777 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for 130ef2252ee598b19b70e01950118900 in 27ms, sequenceid=298, compaction requested=false 2024-11-10T06:34:01,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 130ef2252ee598b19b70e01950118900: 2024-11-10T06:34:01,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] regionserver.HRegion(8855): Flush requested on 130ef2252ee598b19b70e01950118900 2024-11-10T06:34:01,777 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 130ef2252ee598b19b70e01950118900 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-10T06:34:01,781 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/ad4838267b5f4ae19c4ad38b4bc42cd9 is 1080, key is row0230/info:/1731220441751/Put/seqid=0 2024-11-10T06:34:01,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741872_1048 (size=23333) 2024-11-10T06:34:01,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741872_1048 (size=23333) 2024-11-10T06:34:01,788 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/ad4838267b5f4ae19c4ad38b4bc42cd9 2024-11-10T06:34:01,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/ad4838267b5f4ae19c4ad38b4bc42cd9 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/ad4838267b5f4ae19c4ad38b4bc42cd9 2024-11-10T06:34:01,799 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/ad4838267b5f4ae19c4ad38b4bc42cd9, entries=17, sequenceid=318, filesize=22.8 K 2024-11-10T06:34:01,800 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=10.51 KB/10760 for 130ef2252ee598b19b70e01950118900 in 23ms, sequenceid=318, compaction requested=true 2024-11-10T06:34:01,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 130ef2252ee598b19b70e01950118900: 2024-11-10T06:34:01,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 130ef2252ee598b19b70e01950118900:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T06:34:01,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:34:01,800 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T06:34:01,801 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 215663 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T06:34:01,801 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1541): 130ef2252ee598b19b70e01950118900/info is initiating minor compaction (all files) 2024-11-10T06:34:01,801 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 130ef2252ee598b19b70e01950118900/info in TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900. 2024-11-10T06:34:01,801 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/d413470d562445759f62b1cdfd8477eb, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/64528111bc2e449983c1ab2dec796b23, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/ad4838267b5f4ae19c4ad38b4bc42cd9] into tmpdir=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp, totalSize=210.6 K 2024-11-10T06:34:01,802 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting d413470d562445759f62b1cdfd8477eb, keycount=161, bloomtype=ROW, size=175.6 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1731220409313 2024-11-10T06:34:01,802 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting 64528111bc2e449983c1ab2dec796b23, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1731220439739 2024-11-10T06:34:01,802 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] compactions.Compactor(225): Compacting ad4838267b5f4ae19c4ad38b4bc42cd9, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1731220441751 2024-11-10T06:34:01,815 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 130ef2252ee598b19b70e01950118900#info#compaction#86 average throughput is 63.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T06:34:01,816 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/94f43eb31eea4516b0c35d21f077827c is 1080, key is row0062/info:/1731220409313/Put/seqid=0 2024-11-10T06:34:01,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741873_1049 (size=205882) 2024-11-10T06:34:01,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741873_1049 (size=205882) 2024-11-10T06:34:01,824 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/94f43eb31eea4516b0c35d21f077827c as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/94f43eb31eea4516b0c35d21f077827c 2024-11-10T06:34:01,830 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 130ef2252ee598b19b70e01950118900/info of 130ef2252ee598b19b70e01950118900 into 94f43eb31eea4516b0c35d21f077827c(size=201.1 K), total size for store is 201.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T06:34:01,830 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 130ef2252ee598b19b70e01950118900: 2024-11-10T06:34:01,830 INFO [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900., storeName=130ef2252ee598b19b70e01950118900/info, priority=13, startTime=1731220441800; duration=0sec 2024-11-10T06:34:01,831 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T06:34:01,831 DEBUG [RS:0;4999977c7e1b:36039-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 130ef2252ee598b19b70e01950118900:info 2024-11-10T06:34:02,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:34:02,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:34:03,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:34:03,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:34:03,795 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-10T06:34:03,795 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C36039%2C1731220386197.1731220443795 2024-11-10T06:34:03,802 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:03,802 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:03,802 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:03,802 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:03,802 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:03,802 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/WALs/4999977c7e1b,36039,1731220386197/4999977c7e1b%2C36039%2C1731220386197.1731220386582 with entries=310, filesize=307.89 KB; new WAL /user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/WALs/4999977c7e1b,36039,1731220386197/4999977c7e1b%2C36039%2C1731220386197.1731220443795 2024-11-10T06:34:03,803 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34843:34843),(127.0.0.1/127.0.0.1:32853:32853)] 2024-11-10T06:34:03,803 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/WALs/4999977c7e1b,36039,1731220386197/4999977c7e1b%2C36039%2C1731220386197.1731220386582 is not closed yet, will try archiving it next time 2024-11-10T06:34:03,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741833_1009 (size=315283) 2024-11-10T06:34:03,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741833_1009 (size=315283) 2024-11-10T06:34:03,807 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-10T06:34:03,811 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/.tmp/info/1bb7a69cb7d24968bcddfdcf88892a5b is 193, key is TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900./info:regioninfo/1731220412325/Put/seqid=0 2024-11-10T06:34:03,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741875_1051 (size=6223) 2024-11-10T06:34:03,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741875_1051 (size=6223) 2024-11-10T06:34:03,819 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/.tmp/info/1bb7a69cb7d24968bcddfdcf88892a5b 2024-11-10T06:34:03,824 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/.tmp/info/1bb7a69cb7d24968bcddfdcf88892a5b as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/info/1bb7a69cb7d24968bcddfdcf88892a5b 2024-11-10T06:34:03,828 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/info/1bb7a69cb7d24968bcddfdcf88892a5b, entries=5, sequenceid=21, filesize=6.1 K 2024-11-10T06:34:03,829 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 22ms, sequenceid=21, compaction requested=false 2024-11-10T06:34:03,829 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-10T06:34:03,830 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 130ef2252ee598b19b70e01950118900 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-10T06:34:03,833 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/3d5f141302e54a8c9a9a0e7f39b79c07 is 1080, key is row0247/info:/1731220441778/Put/seqid=0 2024-11-10T06:34:03,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741876_1052 (size=15760) 2024-11-10T06:34:03,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741876_1052 (size=15760) 2024-11-10T06:34:03,838 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/3d5f141302e54a8c9a9a0e7f39b79c07 2024-11-10T06:34:03,843 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/.tmp/info/3d5f141302e54a8c9a9a0e7f39b79c07 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/3d5f141302e54a8c9a9a0e7f39b79c07 2024-11-10T06:34:03,847 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/3d5f141302e54a8c9a9a0e7f39b79c07, entries=10, sequenceid=332, filesize=15.4 K 2024-11-10T06:34:03,849 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 130ef2252ee598b19b70e01950118900 in 20ms, sequenceid=332, compaction requested=false 2024-11-10T06:34:03,849 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 130ef2252ee598b19b70e01950118900: 2024-11-10T06:34:03,849 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for d2063cd689984f6265f1135a900cf2f0: 2024-11-10T06:34:03,849 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C36039%2C1731220386197.1731220443849 2024-11-10T06:34:03,854 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:03,854 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:03,854 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:03,854 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:03,854 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:03,854 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/WALs/4999977c7e1b,36039,1731220386197/4999977c7e1b%2C36039%2C1731220386197.1731220443795 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/WALs/4999977c7e1b,36039,1731220386197/4999977c7e1b%2C36039%2C1731220386197.1731220443849 2024-11-10T06:34:03,855 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34843:34843),(127.0.0.1/127.0.0.1:32853:32853)] 2024-11-10T06:34:03,855 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/WALs/4999977c7e1b,36039,1731220386197/4999977c7e1b%2C36039%2C1731220386197.1731220443795 is not closed yet, will try archiving it next time 2024-11-10T06:34:03,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741874_1050 (size=731) 2024-11-10T06:34:03,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741874_1050 (size=731) 2024-11-10T06:34:03,856 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/WALs/4999977c7e1b,36039,1731220386197/4999977c7e1b%2C36039%2C1731220386197.1731220386582 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/oldWALs/4999977c7e1b%2C36039%2C1731220386197.1731220386582 2024-11-10T06:34:03,856 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T06:34:03,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T06:34:03,857 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T06:34:03,857 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:34:03,857 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:34:03,857 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/WALs/4999977c7e1b,36039,1731220386197/4999977c7e1b%2C36039%2C1731220386197.1731220443795 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/oldWALs/4999977c7e1b%2C36039%2C1731220386197.1731220443795 2024-11-10T06:34:03,857 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:34:03,857 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T06:34:03,857 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T06:34:03,857 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1638350121, stopped=false 2024-11-10T06:34:03,857 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=4999977c7e1b,34781,1731220386149 2024-11-10T06:34:03,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T06:34:03,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x10190e226bf0001, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T06:34:03,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:34:03,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x10190e226bf0001, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:34:03,859 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T06:34:03,860 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T06:34:03,860 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:34:03,860 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:34:03,860 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:34:03,860 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4999977c7e1b,36039,1731220386197' ***** 2024-11-10T06:34:03,860 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T06:34:03,861 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36039-0x10190e226bf0001, quorum=127.0.0.1:55342, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:34:03,861 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T06:34:03,861 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T06:34:03,861 INFO [RS:0;4999977c7e1b:36039 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T06:34:03,861 INFO [RS:0;4999977c7e1b:36039 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T06:34:03,861 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.HRegionServer(3091): Received CLOSE for 130ef2252ee598b19b70e01950118900 2024-11-10T06:34:03,861 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.HRegionServer(3091): Received CLOSE for d2063cd689984f6265f1135a900cf2f0 2024-11-10T06:34:03,861 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.HRegionServer(959): stopping server 4999977c7e1b,36039,1731220386197 2024-11-10T06:34:03,861 INFO [RS:0;4999977c7e1b:36039 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T06:34:03,862 INFO [RS:0;4999977c7e1b:36039 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;4999977c7e1b:36039. 2024-11-10T06:34:03,862 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 130ef2252ee598b19b70e01950118900, disabling compactions & flushes 2024-11-10T06:34:03,862 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900. 2024-11-10T06:34:03,862 DEBUG [RS:0;4999977c7e1b:36039 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:34:03,862 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900. 2024-11-10T06:34:03,862 DEBUG [RS:0;4999977c7e1b:36039 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:34:03,862 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900. after waiting 0 ms 2024-11-10T06:34:03,862 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900. 2024-11-10T06:34:03,862 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T06:34:03,862 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T06:34:03,862 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T06:34:03,862 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T06:34:03,864 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-10T06:34:03,864 DEBUG [RS:0;4999977c7e1b:36039 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 130ef2252ee598b19b70e01950118900=TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900., d2063cd689984f6265f1135a900cf2f0=TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0.} 2024-11-10T06:34:03,864 DEBUG [RS:0;4999977c7e1b:36039 {}] regionserver.HRegionServer(1351): Waiting on 130ef2252ee598b19b70e01950118900, 1588230740, d2063cd689984f6265f1135a900cf2f0 2024-11-10T06:34:03,864 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T06:34:03,864 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T06:34:03,864 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T06:34:03,864 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T06:34:03,864 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T06:34:03,864 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/0467660b560a4eff9b4abcc27d74c6ad.25b555b60cc2797a1050b91aa2df6546->hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/0467660b560a4eff9b4abcc27d74c6ad-top, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-df2d699882c74a909e13c778dc4d5c03, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-c6dccb019abe4d5eaf1c036fd7599b3f, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/1cc71627f303476ebe854f7fcdb818d5, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-14f99e4d34bd487baa354975eaf6780e, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/bc6dc6048f2c4510b0a7b80fce144118, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/0fd2deac52274b4f9acd2608b15489ec, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/3a4c8d9ed6d044a9af102386778f11f6, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/0ef7b7165c0d4922aa13f2fe20caa6a8, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/9e54a5fb755440c898a94311d591caa3, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/da32821a2a47425183f70b7686f8b0e1, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/04ac8c9434e640a593169b945ff4278a, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/5ccf0ee163e644929e538ec192fdd730, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/2835aa2261be45fe9b47d8a2a77107a0, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/9eaa18e0b5cb4fd3856a14a12a840c81, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/fb6b92000dd84cf0b71f8d98cbde5b22, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/d215b98efb764ccebae20fa3d0322372, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/710c94f005794489aa452378798e98a7, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/d413470d562445759f62b1cdfd8477eb, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/81883e73e6224010ab0aa4bbfcd591f9, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/64528111bc2e449983c1ab2dec796b23, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/ad4838267b5f4ae19c4ad38b4bc42cd9] to archive 2024-11-10T06:34:03,865 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-10T06:34:03,867 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/0467660b560a4eff9b4abcc27d74c6ad.25b555b60cc2797a1050b91aa2df6546 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/0467660b560a4eff9b4abcc27d74c6ad.25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:34:03,868 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-df2d699882c74a909e13c778dc4d5c03 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-df2d699882c74a909e13c778dc4d5c03 2024-11-10T06:34:03,869 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-10T06:34:03,869 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T06:34:03,869 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T06:34:03,869 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731220443864Running coprocessor pre-close hooks at 1731220443864Disabling compacts and flushes for region at 1731220443864Disabling writes for close at 1731220443864Writing region close event to WAL at 1731220443865 (+1 ms)Running coprocessor post-close hooks at 1731220443869 (+4 ms)Closed at 1731220443869 2024-11-10T06:34:03,869 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T06:34:03,869 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-c6dccb019abe4d5eaf1c036fd7599b3f to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-c6dccb019abe4d5eaf1c036fd7599b3f 2024-11-10T06:34:03,871 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/1cc71627f303476ebe854f7fcdb818d5 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/1cc71627f303476ebe854f7fcdb818d5 2024-11-10T06:34:03,872 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-14f99e4d34bd487baa354975eaf6780e to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/TestLogRolling-testLogRolling=25b555b60cc2797a1050b91aa2df6546-14f99e4d34bd487baa354975eaf6780e 2024-11-10T06:34:03,873 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/bc6dc6048f2c4510b0a7b80fce144118 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/bc6dc6048f2c4510b0a7b80fce144118 2024-11-10T06:34:03,874 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/0fd2deac52274b4f9acd2608b15489ec to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/0fd2deac52274b4f9acd2608b15489ec 2024-11-10T06:34:03,875 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/3a4c8d9ed6d044a9af102386778f11f6 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/3a4c8d9ed6d044a9af102386778f11f6 2024-11-10T06:34:03,876 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/0ef7b7165c0d4922aa13f2fe20caa6a8 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/0ef7b7165c0d4922aa13f2fe20caa6a8 2024-11-10T06:34:03,877 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/9e54a5fb755440c898a94311d591caa3 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/9e54a5fb755440c898a94311d591caa3 2024-11-10T06:34:03,878 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/da32821a2a47425183f70b7686f8b0e1 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/da32821a2a47425183f70b7686f8b0e1 2024-11-10T06:34:03,879 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/04ac8c9434e640a593169b945ff4278a to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/04ac8c9434e640a593169b945ff4278a 2024-11-10T06:34:03,880 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/5ccf0ee163e644929e538ec192fdd730 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/5ccf0ee163e644929e538ec192fdd730 2024-11-10T06:34:03,881 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/2835aa2261be45fe9b47d8a2a77107a0 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/2835aa2261be45fe9b47d8a2a77107a0 2024-11-10T06:34:03,882 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/9eaa18e0b5cb4fd3856a14a12a840c81 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/9eaa18e0b5cb4fd3856a14a12a840c81 2024-11-10T06:34:03,883 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/fb6b92000dd84cf0b71f8d98cbde5b22 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/fb6b92000dd84cf0b71f8d98cbde5b22 2024-11-10T06:34:03,884 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/d215b98efb764ccebae20fa3d0322372 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/d215b98efb764ccebae20fa3d0322372 2024-11-10T06:34:03,885 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/710c94f005794489aa452378798e98a7 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/710c94f005794489aa452378798e98a7 2024-11-10T06:34:03,886 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/d413470d562445759f62b1cdfd8477eb to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/d413470d562445759f62b1cdfd8477eb 2024-11-10T06:34:03,887 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/81883e73e6224010ab0aa4bbfcd591f9 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/81883e73e6224010ab0aa4bbfcd591f9 2024-11-10T06:34:03,888 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/64528111bc2e449983c1ab2dec796b23 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/64528111bc2e449983c1ab2dec796b23 2024-11-10T06:34:03,889 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/ad4838267b5f4ae19c4ad38b4bc42cd9 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/info/ad4838267b5f4ae19c4ad38b4bc42cd9 2024-11-10T06:34:03,889 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=4999977c7e1b:34781 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-10T06:34:03,889 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [1cc71627f303476ebe854f7fcdb818d5=43081, bc6dc6048f2c4510b0a7b80fce144118=12516, 0fd2deac52274b4f9acd2608b15489ec=68045, 3a4c8d9ed6d044a9af102386778f11f6=22238, 0ef7b7165c0d4922aa13f2fe20caa6a8=15750, 9e54a5fb755440c898a94311d591caa3=96252, da32821a2a47425183f70b7686f8b0e1=22238, 04ac8c9434e640a593169b945ff4278a=20078, 5ccf0ee163e644929e538ec192fdd730=118996, 2835aa2261be45fe9b47d8a2a77107a0=12516, 9eaa18e0b5cb4fd3856a14a12a840c81=20078, fb6b92000dd84cf0b71f8d98cbde5b22=148409, d215b98efb764ccebae20fa3d0322372=19000, 710c94f005794489aa452378798e98a7=19010, d413470d562445759f62b1cdfd8477eb=179807, 81883e73e6224010ab0aa4bbfcd591f9=22254, 64528111bc2e449983c1ab2dec796b23=12523, ad4838267b5f4ae19c4ad38b4bc42cd9=23333] 2024-11-10T06:34:03,893 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/130ef2252ee598b19b70e01950118900/recovered.edits/335.seqid, newMaxSeqId=335, maxSeqId=126 2024-11-10T06:34:03,893 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900. 2024-11-10T06:34:03,893 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 130ef2252ee598b19b70e01950118900: Waiting for close lock at 1731220443861Running coprocessor pre-close hooks at 1731220443861Disabling compacts and flushes for region at 1731220443861Disabling writes for close at 1731220443862 (+1 ms)Writing region close event to WAL at 1731220443890 (+28 ms)Running coprocessor post-close hooks at 1731220443893 (+3 ms)Closed at 1731220443893 2024-11-10T06:34:03,893 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731220411392.130ef2252ee598b19b70e01950118900. 2024-11-10T06:34:03,893 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d2063cd689984f6265f1135a900cf2f0, disabling compactions & flushes 2024-11-10T06:34:03,893 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0. 2024-11-10T06:34:03,893 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0. 2024-11-10T06:34:03,893 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0. after waiting 0 ms 2024-11-10T06:34:03,893 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0. 2024-11-10T06:34:03,894 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/d2063cd689984f6265f1135a900cf2f0/info/0467660b560a4eff9b4abcc27d74c6ad.25b555b60cc2797a1050b91aa2df6546->hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/25b555b60cc2797a1050b91aa2df6546/info/0467660b560a4eff9b4abcc27d74c6ad-bottom] to archive 2024-11-10T06:34:03,895 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-10T06:34:03,896 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/d2063cd689984f6265f1135a900cf2f0/info/0467660b560a4eff9b4abcc27d74c6ad.25b555b60cc2797a1050b91aa2df6546 to hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/archive/data/default/TestLogRolling-testLogRolling/d2063cd689984f6265f1135a900cf2f0/info/0467660b560a4eff9b4abcc27d74c6ad.25b555b60cc2797a1050b91aa2df6546 2024-11-10T06:34:03,896 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-10T06:34:03,899 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/data/default/TestLogRolling-testLogRolling/d2063cd689984f6265f1135a900cf2f0/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-11-10T06:34:03,900 INFO [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0. 2024-11-10T06:34:03,900 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d2063cd689984f6265f1135a900cf2f0: Waiting for close lock at 1731220443893Running coprocessor pre-close hooks at 1731220443893Disabling compacts and flushes for region at 1731220443893Disabling writes for close at 1731220443893Writing region close event to WAL at 1731220443896 (+3 ms)Running coprocessor post-close hooks at 1731220443900 (+4 ms)Closed at 1731220443900 2024-11-10T06:34:03,900 DEBUG [RS_CLOSE_REGION-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731220411392.d2063cd689984f6265f1135a900cf2f0. 2024-11-10T06:34:04,064 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.HRegionServer(976): stopping server 4999977c7e1b,36039,1731220386197; all regions closed. 2024-11-10T06:34:04,065 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:04,065 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:04,065 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:04,065 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:04,065 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:04,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741834_1010 (size=8107) 2024-11-10T06:34:04,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741834_1010 (size=8107) 2024-11-10T06:34:04,069 DEBUG [RS:0;4999977c7e1b:36039 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/oldWALs 2024-11-10T06:34:04,069 INFO [RS:0;4999977c7e1b:36039 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 4999977c7e1b%2C36039%2C1731220386197.meta:.meta(num 1731220386952) 2024-11-10T06:34:04,070 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:04,070 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:04,070 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:04,070 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:04,070 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:04,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741877_1053 (size=780) 2024-11-10T06:34:04,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741877_1053 (size=780) 2024-11-10T06:34:04,073 DEBUG [RS:0;4999977c7e1b:36039 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/oldWALs 2024-11-10T06:34:04,073 INFO [RS:0;4999977c7e1b:36039 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 4999977c7e1b%2C36039%2C1731220386197:(num 1731220443849) 2024-11-10T06:34:04,073 DEBUG [RS:0;4999977c7e1b:36039 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:34:04,073 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T06:34:04,074 INFO [RS:0;4999977c7e1b:36039 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T06:34:04,074 INFO [RS:0;4999977c7e1b:36039 {}] hbase.ChoreService(370): Chore service for: regionserver/4999977c7e1b:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T06:34:04,074 INFO [RS:0;4999977c7e1b:36039 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T06:34:04,074 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T06:34:04,074 INFO [RS:0;4999977c7e1b:36039 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36039 2024-11-10T06:34:04,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x10190e226bf0001, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4999977c7e1b,36039,1731220386197 2024-11-10T06:34:04,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T06:34:04,076 INFO [RS:0;4999977c7e1b:36039 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T06:34:04,077 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4999977c7e1b,36039,1731220386197] 2024-11-10T06:34:04,078 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4999977c7e1b,36039,1731220386197 already deleted, retry=false 2024-11-10T06:34:04,078 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4999977c7e1b,36039,1731220386197 expired; onlineServers=0 2024-11-10T06:34:04,078 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '4999977c7e1b,34781,1731220386149' ***** 2024-11-10T06:34:04,078 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T06:34:04,079 INFO [M:0;4999977c7e1b:34781 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T06:34:04,079 INFO [M:0;4999977c7e1b:34781 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T06:34:04,079 DEBUG [M:0;4999977c7e1b:34781 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T06:34:04,079 DEBUG [M:0;4999977c7e1b:34781 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T06:34:04,079 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T06:34:04,079 DEBUG [master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220386348 {}] cleaner.HFileCleaner(306): Exit Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220386348,5,FailOnTimeoutGroup] 2024-11-10T06:34:04,079 DEBUG [master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220386349 {}] cleaner.HFileCleaner(306): Exit Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220386349,5,FailOnTimeoutGroup] 2024-11-10T06:34:04,079 INFO [M:0;4999977c7e1b:34781 {}] hbase.ChoreService(370): Chore service for: master/4999977c7e1b:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T06:34:04,079 INFO [M:0;4999977c7e1b:34781 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T06:34:04,079 DEBUG [M:0;4999977c7e1b:34781 {}] master.HMaster(1795): Stopping service threads 2024-11-10T06:34:04,079 INFO [M:0;4999977c7e1b:34781 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T06:34:04,079 INFO [M:0;4999977c7e1b:34781 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T06:34:04,079 INFO [M:0;4999977c7e1b:34781 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T06:34:04,080 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T06:34:04,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T06:34:04,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:34:04,080 DEBUG [M:0;4999977c7e1b:34781 {}] zookeeper.ZKUtil(347): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T06:34:04,080 WARN [M:0;4999977c7e1b:34781 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T06:34:04,081 INFO [M:0;4999977c7e1b:34781 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/.lastflushedseqids 2024-11-10T06:34:04,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741878_1054 (size=228) 2024-11-10T06:34:04,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741878_1054 (size=228) 2024-11-10T06:34:04,086 INFO [M:0;4999977c7e1b:34781 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T06:34:04,086 INFO [M:0;4999977c7e1b:34781 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T06:34:04,087 DEBUG [M:0;4999977c7e1b:34781 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T06:34:04,087 INFO [M:0;4999977c7e1b:34781 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:34:04,087 DEBUG [M:0;4999977c7e1b:34781 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:34:04,087 DEBUG [M:0;4999977c7e1b:34781 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T06:34:04,087 DEBUG [M:0;4999977c7e1b:34781 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:34:04,087 INFO [M:0;4999977c7e1b:34781 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-10T06:34:04,103 DEBUG [M:0;4999977c7e1b:34781 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0bf5fcc81a3b493b8925e5d11eec62b1 is 82, key is hbase:meta,,1/info:regioninfo/1731220386974/Put/seqid=0 2024-11-10T06:34:04,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741879_1055 (size=5672) 2024-11-10T06:34:04,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741879_1055 (size=5672) 2024-11-10T06:34:04,108 INFO [M:0;4999977c7e1b:34781 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0bf5fcc81a3b493b8925e5d11eec62b1 2024-11-10T06:34:04,126 DEBUG [M:0;4999977c7e1b:34781 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1defbc22a14f465694055ea1bb7881b2 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731220387389/Put/seqid=0 2024-11-10T06:34:04,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741880_1056 (size=7090) 2024-11-10T06:34:04,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741880_1056 (size=7090) 2024-11-10T06:34:04,131 INFO [M:0;4999977c7e1b:34781 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1defbc22a14f465694055ea1bb7881b2 2024-11-10T06:34:04,135 INFO [M:0;4999977c7e1b:34781 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1defbc22a14f465694055ea1bb7881b2 2024-11-10T06:34:04,150 DEBUG [M:0;4999977c7e1b:34781 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2ad3aa308e024671b1eefd3cc7b64f69 is 69, key is 4999977c7e1b,36039,1731220386197/rs:state/1731220386433/Put/seqid=0 2024-11-10T06:34:04,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741881_1057 (size=5156) 2024-11-10T06:34:04,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741881_1057 (size=5156) 2024-11-10T06:34:04,155 INFO [M:0;4999977c7e1b:34781 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2ad3aa308e024671b1eefd3cc7b64f69 2024-11-10T06:34:04,173 DEBUG [M:0;4999977c7e1b:34781 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/26b08f72ca96441383f39dac1514d4f8 is 52, key is load_balancer_on/state:d/1731220387021/Put/seqid=0 2024-11-10T06:34:04,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741882_1058 (size=5056) 2024-11-10T06:34:04,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x10190e226bf0001, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:34:04,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36039-0x10190e226bf0001, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:34:04,178 INFO [RS:0;4999977c7e1b:36039 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T06:34:04,178 INFO [RS:0;4999977c7e1b:36039 {}] regionserver.HRegionServer(1031): Exiting; stopping=4999977c7e1b,36039,1731220386197; zookeeper connection closed. 2024-11-10T06:34:04,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741882_1058 (size=5056) 2024-11-10T06:34:04,178 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3668ab6b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3668ab6b 2024-11-10T06:34:04,178 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-10T06:34:04,178 INFO [M:0;4999977c7e1b:34781 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/26b08f72ca96441383f39dac1514d4f8 2024-11-10T06:34:04,183 DEBUG [M:0;4999977c7e1b:34781 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0bf5fcc81a3b493b8925e5d11eec62b1 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0bf5fcc81a3b493b8925e5d11eec62b1 2024-11-10T06:34:04,187 INFO [M:0;4999977c7e1b:34781 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0bf5fcc81a3b493b8925e5d11eec62b1, entries=8, sequenceid=125, filesize=5.5 K 2024-11-10T06:34:04,188 DEBUG [M:0;4999977c7e1b:34781 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1defbc22a14f465694055ea1bb7881b2 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1defbc22a14f465694055ea1bb7881b2 2024-11-10T06:34:04,192 INFO [M:0;4999977c7e1b:34781 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1defbc22a14f465694055ea1bb7881b2 2024-11-10T06:34:04,193 INFO [M:0;4999977c7e1b:34781 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1defbc22a14f465694055ea1bb7881b2, entries=13, sequenceid=125, filesize=6.9 K 2024-11-10T06:34:04,193 DEBUG [M:0;4999977c7e1b:34781 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2ad3aa308e024671b1eefd3cc7b64f69 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2ad3aa308e024671b1eefd3cc7b64f69 2024-11-10T06:34:04,197 INFO [M:0;4999977c7e1b:34781 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2ad3aa308e024671b1eefd3cc7b64f69, entries=1, sequenceid=125, filesize=5.0 K 2024-11-10T06:34:04,198 DEBUG [M:0;4999977c7e1b:34781 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/26b08f72ca96441383f39dac1514d4f8 as hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/26b08f72ca96441383f39dac1514d4f8 2024-11-10T06:34:04,202 INFO [M:0;4999977c7e1b:34781 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35403/user/jenkins/test-data/a1ef7709-66b8-2099-a668-c80df89a800b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/26b08f72ca96441383f39dac1514d4f8, entries=1, sequenceid=125, filesize=4.9 K 2024-11-10T06:34:04,202 INFO [M:0;4999977c7e1b:34781 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=125, compaction requested=false 2024-11-10T06:34:04,204 INFO [M:0;4999977c7e1b:34781 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:34:04,204 DEBUG [M:0;4999977c7e1b:34781 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731220444086Disabling compacts and flushes for region at 1731220444086Disabling writes for close at 1731220444087 (+1 ms)Obtaining lock to block concurrent updates at 1731220444087Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731220444087Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1731220444087Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731220444088 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731220444088Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731220444103 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731220444103Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731220444112 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731220444126 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731220444126Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731220444135 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731220444149 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731220444149Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731220444159 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731220444172 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731220444172Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@69f369ec: reopening flushed file at 1731220444182 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19638fab: reopening flushed file at 1731220444187 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f348025: reopening flushed file at 1731220444193 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2999d960: reopening flushed file at 1731220444197 (+4 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=125, compaction requested=false at 1731220444202 (+5 ms)Writing region close event to WAL at 1731220444204 (+2 ms)Closed at 1731220444204 2024-11-10T06:34:04,204 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:04,205 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:04,205 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:04,205 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:04,205 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:04,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741830_1006 (size=61320) 2024-11-10T06:34:04,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45029 is added to blk_1073741830_1006 (size=61320) 2024-11-10T06:34:04,207 INFO [M:0;4999977c7e1b:34781 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T06:34:04,207 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T06:34:04,207 INFO [M:0;4999977c7e1b:34781 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34781 2024-11-10T06:34:04,208 INFO [M:0;4999977c7e1b:34781 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T06:34:04,309 INFO [M:0;4999977c7e1b:34781 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T06:34:04,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:34:04,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34781-0x10190e226bf0000, quorum=127.0.0.1:55342, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:34:04,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2bf4ff10{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:34:04,313 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6ccca08b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:34:04,313 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:34:04,313 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57641b80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:34:04,313 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a96e527{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/hadoop.log.dir/,STOPPED} 2024-11-10T06:34:04,315 WARN [BP-377058856-172.17.0.2-1731220385492 heartbeating to localhost/127.0.0.1:35403 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:34:04,315 WARN [BP-377058856-172.17.0.2-1731220385492 heartbeating to localhost/127.0.0.1:35403 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-377058856-172.17.0.2-1731220385492 (Datanode Uuid ab94b412-564b-4790-bf2f-58b43a9ddb36) service to localhost/127.0.0.1:35403 2024-11-10T06:34:04,315 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:34:04,315 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:34:04,315 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/cluster_ef24c530-3e5a-da26-5472-37cd2ee79f54/data/data3/current/BP-377058856-172.17.0.2-1731220385492 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:34:04,316 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/cluster_ef24c530-3e5a-da26-5472-37cd2ee79f54/data/data4/current/BP-377058856-172.17.0.2-1731220385492 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:34:04,316 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:34:04,318 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c013a6e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:34:04,318 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1798c27d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:34:04,318 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:34:04,318 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41b0b61{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:34:04,318 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42424a91{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/hadoop.log.dir/,STOPPED} 2024-11-10T06:34:04,320 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:34:04,320 WARN [BP-377058856-172.17.0.2-1731220385492 heartbeating to localhost/127.0.0.1:35403 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:34:04,320 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:34:04,320 WARN [BP-377058856-172.17.0.2-1731220385492 heartbeating to localhost/127.0.0.1:35403 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-377058856-172.17.0.2-1731220385492 (Datanode Uuid e2ce63fb-ae1d-4b2f-89cb-747b93b25985) service to localhost/127.0.0.1:35403 2024-11-10T06:34:04,320 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/cluster_ef24c530-3e5a-da26-5472-37cd2ee79f54/data/data1/current/BP-377058856-172.17.0.2-1731220385492 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:34:04,321 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/cluster_ef24c530-3e5a-da26-5472-37cd2ee79f54/data/data2/current/BP-377058856-172.17.0.2-1731220385492 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:34:04,321 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:34:04,327 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@34478999{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T06:34:04,328 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ffb9dd1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:34:04,328 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:34:04,328 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13bc5e1d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:34:04,328 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f89d121{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/hadoop.log.dir/,STOPPED} 2024-11-10T06:34:04,335 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-10T06:34:04,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-10T06:34:04,373 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=230 (was 205) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35403 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35403 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35403 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:35403 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35403 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:35403 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:35403 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:35403 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=512 (was 485) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=15 (was 40), ProcessCount=11 (was 11), AvailableMemoryMB=6948 (was 6992) 2024-11-10T06:34:04,381 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=230, OpenFileDescriptor=512, MaxFileDescriptor=1048576, SystemLoadAverage=15, ProcessCount=11, AvailableMemoryMB=6948 2024-11-10T06:34:04,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T06:34:04,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/hadoop.log.dir so I do NOT create it in target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07 2024-11-10T06:34:04,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/579b6725-10db-1652-b2fb-64f330c8e350/hadoop.tmp.dir so I do NOT create it in target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07 2024-11-10T06:34:04,381 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/cluster_55264451-bed9-b2e4-0fb1-ec85e39516d9, deleteOnExit=true 2024-11-10T06:34:04,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-10T06:34:04,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/test.cache.data in system properties and HBase conf 2024-11-10T06:34:04,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T06:34:04,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/hadoop.log.dir in system properties and HBase conf 2024-11-10T06:34:04,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T06:34:04,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T06:34:04,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T06:34:04,382 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T06:34:04,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T06:34:04,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T06:34:04,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T06:34:04,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T06:34:04,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T06:34:04,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T06:34:04,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T06:34:04,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T06:34:04,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T06:34:04,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/nfs.dump.dir in system properties and HBase conf 2024-11-10T06:34:04,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/java.io.tmpdir in system properties and HBase conf 2024-11-10T06:34:04,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T06:34:04,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T06:34:04,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T06:34:04,395 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T06:34:04,450 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:34:04,450 INFO [regionserver/4999977c7e1b:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T06:34:04,453 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:34:04,454 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:34:04,454 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:34:04,454 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T06:34:04,455 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:34:04,455 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10471e56{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:34:04,456 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3364e6da{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:34:04,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:34:04,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:34:04,569 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@10602cad{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/java.io.tmpdir/jetty-localhost-37633-hadoop-hdfs-3_4_1-tests_jar-_-any-12977864812224527710/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T06:34:04,569 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3ae4e29{HTTP/1.1, (http/1.1)}{localhost:37633} 2024-11-10T06:34:04,569 INFO [Time-limited test {}] server.Server(415): Started @294270ms 2024-11-10T06:34:04,582 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T06:34:04,632 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:34:04,634 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:34:04,635 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:34:04,635 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:34:04,635 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T06:34:04,636 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5d1dcdb8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:34:04,636 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@525207a7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:34:04,749 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@280c54a4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/java.io.tmpdir/jetty-localhost-40723-hadoop-hdfs-3_4_1-tests_jar-_-any-9114291364710557990/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:34:04,749 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2fba4655{HTTP/1.1, (http/1.1)}{localhost:40723} 2024-11-10T06:34:04,749 INFO [Time-limited test {}] server.Server(415): Started @294450ms 2024-11-10T06:34:04,750 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:34:04,778 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T06:34:04,780 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T06:34:04,781 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T06:34:04,781 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T06:34:04,781 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T06:34:04,781 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4519d7f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/hadoop.log.dir/,AVAILABLE} 2024-11-10T06:34:04,781 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@787f2444{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T06:34:04,844 WARN [Thread-2466 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/cluster_55264451-bed9-b2e4-0fb1-ec85e39516d9/data/data1/current/BP-260444421-172.17.0.2-1731220444401/current, will proceed with Du for space computation calculation, 2024-11-10T06:34:04,844 WARN [Thread-2467 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/cluster_55264451-bed9-b2e4-0fb1-ec85e39516d9/data/data2/current/BP-260444421-172.17.0.2-1731220444401/current, will proceed with Du for space computation calculation, 2024-11-10T06:34:04,860 WARN [Thread-2445 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:34:04,863 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x64987e614094e062 with lease ID 0xb978b39ecd4d1aa4: Processing first storage report for DS-171c94a0-4dfa-416a-bb50-afed8c6c3c75 from datanode DatanodeRegistration(127.0.0.1:44529, datanodeUuid=7535dde5-f1e3-44af-8787-1490bfa5ef54, infoPort=34841, infoSecurePort=0, ipcPort=34901, storageInfo=lv=-57;cid=testClusterID;nsid=491404720;c=1731220444401) 2024-11-10T06:34:04,863 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64987e614094e062 with lease ID 0xb978b39ecd4d1aa4: from storage DS-171c94a0-4dfa-416a-bb50-afed8c6c3c75 node DatanodeRegistration(127.0.0.1:44529, datanodeUuid=7535dde5-f1e3-44af-8787-1490bfa5ef54, infoPort=34841, infoSecurePort=0, ipcPort=34901, storageInfo=lv=-57;cid=testClusterID;nsid=491404720;c=1731220444401), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:34:04,863 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x64987e614094e062 with lease ID 0xb978b39ecd4d1aa4: Processing first storage report for DS-ea79732b-2621-4ee0-af6a-baecf355cdb6 from datanode DatanodeRegistration(127.0.0.1:44529, datanodeUuid=7535dde5-f1e3-44af-8787-1490bfa5ef54, infoPort=34841, infoSecurePort=0, ipcPort=34901, storageInfo=lv=-57;cid=testClusterID;nsid=491404720;c=1731220444401) 2024-11-10T06:34:04,863 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64987e614094e062 with lease ID 0xb978b39ecd4d1aa4: from storage DS-ea79732b-2621-4ee0-af6a-baecf355cdb6 node DatanodeRegistration(127.0.0.1:44529, datanodeUuid=7535dde5-f1e3-44af-8787-1490bfa5ef54, infoPort=34841, infoSecurePort=0, ipcPort=34901, storageInfo=lv=-57;cid=testClusterID;nsid=491404720;c=1731220444401), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:34:04,897 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@36271130{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/java.io.tmpdir/jetty-localhost-41465-hadoop-hdfs-3_4_1-tests_jar-_-any-736151463982370484/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:34:04,898 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2f8244c4{HTTP/1.1, (http/1.1)}{localhost:41465} 2024-11-10T06:34:04,898 INFO [Time-limited test {}] server.Server(415): Started @294599ms 2024-11-10T06:34:04,899 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T06:34:04,984 WARN [Thread-2492 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/cluster_55264451-bed9-b2e4-0fb1-ec85e39516d9/data/data3/current/BP-260444421-172.17.0.2-1731220444401/current, will proceed with Du for space computation calculation, 2024-11-10T06:34:04,984 WARN [Thread-2493 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/cluster_55264451-bed9-b2e4-0fb1-ec85e39516d9/data/data4/current/BP-260444421-172.17.0.2-1731220444401/current, will proceed with Du for space computation calculation, 2024-11-10T06:34:05,000 WARN [Thread-2481 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T06:34:05,002 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8393bafa2f73eb82 with lease ID 0xb978b39ecd4d1aa5: Processing first storage report for DS-e29e1e7b-0f87-4d55-b16d-af205e65b23d from datanode DatanodeRegistration(127.0.0.1:37173, datanodeUuid=10d06763-585f-4d42-9665-1a9078eeefd8, infoPort=42145, infoSecurePort=0, ipcPort=40865, storageInfo=lv=-57;cid=testClusterID;nsid=491404720;c=1731220444401) 2024-11-10T06:34:05,002 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8393bafa2f73eb82 with lease ID 0xb978b39ecd4d1aa5: from storage DS-e29e1e7b-0f87-4d55-b16d-af205e65b23d node DatanodeRegistration(127.0.0.1:37173, datanodeUuid=10d06763-585f-4d42-9665-1a9078eeefd8, infoPort=42145, infoSecurePort=0, ipcPort=40865, storageInfo=lv=-57;cid=testClusterID;nsid=491404720;c=1731220444401), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:34:05,002 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8393bafa2f73eb82 with lease ID 0xb978b39ecd4d1aa5: Processing first storage report for DS-dbb1e062-55c5-4618-b9bb-7bcb07ec57b5 from datanode DatanodeRegistration(127.0.0.1:37173, datanodeUuid=10d06763-585f-4d42-9665-1a9078eeefd8, infoPort=42145, infoSecurePort=0, ipcPort=40865, storageInfo=lv=-57;cid=testClusterID;nsid=491404720;c=1731220444401) 2024-11-10T06:34:05,002 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8393bafa2f73eb82 with lease ID 0xb978b39ecd4d1aa5: from storage DS-dbb1e062-55c5-4618-b9bb-7bcb07ec57b5 node DatanodeRegistration(127.0.0.1:37173, datanodeUuid=10d06763-585f-4d42-9665-1a9078eeefd8, infoPort=42145, infoSecurePort=0, ipcPort=40865, storageInfo=lv=-57;cid=testClusterID;nsid=491404720;c=1731220444401), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T06:34:05,020 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07 2024-11-10T06:34:05,023 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/cluster_55264451-bed9-b2e4-0fb1-ec85e39516d9/zookeeper_0, clientPort=49457, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/cluster_55264451-bed9-b2e4-0fb1-ec85e39516d9/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/cluster_55264451-bed9-b2e4-0fb1-ec85e39516d9/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T06:34:05,023 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49457 2024-11-10T06:34:05,024 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:34:05,025 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:34:05,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37173 is added to blk_1073741825_1001 (size=7) 2024-11-10T06:34:05,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44529 is added to blk_1073741825_1001 (size=7) 2024-11-10T06:34:05,034 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e with version=8 2024-11-10T06:34:05,034 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37837/user/jenkins/test-data/ccdf7007-9cb0-6f54-f60c-5bcc4497a857/hbase-staging 2024-11-10T06:34:05,036 INFO [Time-limited test {}] client.ConnectionUtils(128): master/4999977c7e1b:0 server-side Connection retries=45 2024-11-10T06:34:05,036 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:34:05,036 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T06:34:05,036 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T06:34:05,036 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:34:05,036 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T06:34:05,036 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T06:34:05,036 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T06:34:05,040 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40861 2024-11-10T06:34:05,041 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40861 connecting to ZooKeeper ensemble=127.0.0.1:49457 2024-11-10T06:34:05,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:408610x0, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T06:34:05,047 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40861-0x10190e30cc60000 connected 2024-11-10T06:34:05,062 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:34:05,063 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:34:05,065 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:34:05,065 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e, hbase.cluster.distributed=false 2024-11-10T06:34:05,067 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T06:34:05,067 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40861 2024-11-10T06:34:05,067 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40861 2024-11-10T06:34:05,068 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40861 2024-11-10T06:34:05,068 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40861 2024-11-10T06:34:05,068 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40861 2024-11-10T06:34:05,083 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/4999977c7e1b:0 server-side Connection retries=45 2024-11-10T06:34:05,083 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:34:05,083 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T06:34:05,083 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T06:34:05,083 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T06:34:05,083 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T06:34:05,083 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T06:34:05,083 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T06:34:05,084 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33787 2024-11-10T06:34:05,085 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33787 connecting to ZooKeeper ensemble=127.0.0.1:49457 2024-11-10T06:34:05,085 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:34:05,087 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:34:05,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:337870x0, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T06:34:05,091 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:34:05,091 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33787-0x10190e30cc60001 connected 2024-11-10T06:34:05,092 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T06:34:05,092 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T06:34:05,093 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T06:34:05,094 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T06:34:05,094 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33787 2024-11-10T06:34:05,094 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33787 2024-11-10T06:34:05,096 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33787 2024-11-10T06:34:05,096 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33787 2024-11-10T06:34:05,099 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33787 2024-11-10T06:34:05,111 DEBUG [M:0;4999977c7e1b:40861 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;4999977c7e1b:40861 2024-11-10T06:34:05,112 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/4999977c7e1b,40861,1731220445036 2024-11-10T06:34:05,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:34:05,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:34:05,114 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/4999977c7e1b,40861,1731220445036 2024-11-10T06:34:05,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T06:34:05,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:34:05,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:34:05,116 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T06:34:05,117 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/4999977c7e1b,40861,1731220445036 from backup master directory 2024-11-10T06:34:05,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/4999977c7e1b,40861,1731220445036 2024-11-10T06:34:05,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:34:05,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T06:34:05,118 WARN [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T06:34:05,118 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=4999977c7e1b,40861,1731220445036 2024-11-10T06:34:05,122 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/hbase.id] with ID: c663742e-44e5-46a0-80a1-d04cf4203e8c 2024-11-10T06:34:05,122 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/.tmp/hbase.id 2024-11-10T06:34:05,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44529 is added to blk_1073741826_1002 (size=42) 2024-11-10T06:34:05,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37173 is added to blk_1073741826_1002 (size=42) 2024-11-10T06:34:05,127 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/.tmp/hbase.id]:[hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/hbase.id] 2024-11-10T06:34:05,137 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:34:05,137 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T06:34:05,138 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-10T06:34:05,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:34:05,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:34:05,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37173 is added to blk_1073741827_1003 (size=196) 2024-11-10T06:34:05,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44529 is added to blk_1073741827_1003 (size=196) 2024-11-10T06:34:05,146 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T06:34:05,146 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T06:34:05,147 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:34:05,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37173 is added to blk_1073741828_1004 (size=1189) 2024-11-10T06:34:05,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44529 is added to blk_1073741828_1004 (size=1189) 2024-11-10T06:34:05,153 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store 2024-11-10T06:34:05,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44529 is added to blk_1073741829_1005 (size=34) 2024-11-10T06:34:05,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37173 is added to blk_1073741829_1005 (size=34) 2024-11-10T06:34:05,159 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:34:05,159 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T06:34:05,159 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:34:05,159 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:34:05,159 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T06:34:05,159 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:34:05,159 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:34:05,159 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731220445159Disabling compacts and flushes for region at 1731220445159Disabling writes for close at 1731220445159Writing region close event to WAL at 1731220445159Closed at 1731220445159 2024-11-10T06:34:05,159 WARN [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/.initializing 2024-11-10T06:34:05,159 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/WALs/4999977c7e1b,40861,1731220445036 2024-11-10T06:34:05,162 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C40861%2C1731220445036, suffix=, logDir=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/WALs/4999977c7e1b,40861,1731220445036, archiveDir=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/oldWALs, maxLogs=10 2024-11-10T06:34:05,162 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C40861%2C1731220445036.1731220445162 2024-11-10T06:34:05,166 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/WALs/4999977c7e1b,40861,1731220445036/4999977c7e1b%2C40861%2C1731220445036.1731220445162 2024-11-10T06:34:05,168 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42145:42145),(127.0.0.1/127.0.0.1:34841:34841)] 2024-11-10T06:34:05,171 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T06:34:05,171 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:34:05,171 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:34:05,171 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:34:05,173 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:34:05,174 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T06:34:05,174 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:34:05,175 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:34:05,175 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:34:05,176 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T06:34:05,176 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:34:05,176 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:34:05,177 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:34:05,178 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T06:34:05,178 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:34:05,178 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:34:05,178 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:34:05,179 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T06:34:05,179 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:34:05,180 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T06:34:05,180 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:34:05,180 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:34:05,181 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:34:05,181 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:34:05,181 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:34:05,182 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T06:34:05,183 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T06:34:05,185 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T06:34:05,185 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=688491, jitterRate=-0.12453947961330414}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T06:34:05,185 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731220445171Initializing all the Stores at 1731220445172 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220445172Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220445172Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220445172Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220445172Cleaning up temporary data from old regions at 1731220445181 (+9 ms)Region opened successfully at 1731220445185 (+4 ms) 2024-11-10T06:34:05,186 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T06:34:05,188 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27e5495b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4999977c7e1b/172.17.0.2:0 2024-11-10T06:34:05,189 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T06:34:05,189 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T06:34:05,189 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T06:34:05,189 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T06:34:05,190 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-10T06:34:05,190 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-10T06:34:05,190 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T06:34:05,192 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T06:34:05,193 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T06:34:05,194 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T06:34:05,195 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T06:34:05,195 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T06:34:05,196 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T06:34:05,197 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T06:34:05,197 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T06:34:05,199 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T06:34:05,199 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T06:34:05,201 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T06:34:05,203 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T06:34:05,206 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T06:34:05,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T06:34:05,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T06:34:05,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:34:05,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:34:05,208 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=4999977c7e1b,40861,1731220445036, sessionid=0x10190e30cc60000, setting cluster-up flag (Was=false) 2024-11-10T06:34:05,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:34:05,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:34:05,217 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T06:34:05,218 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4999977c7e1b,40861,1731220445036 2024-11-10T06:34:05,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:34:05,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:34:05,227 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T06:34:05,228 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4999977c7e1b,40861,1731220445036 2024-11-10T06:34:05,229 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T06:34:05,230 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T06:34:05,231 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T06:34:05,231 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T06:34:05,231 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 4999977c7e1b,40861,1731220445036 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T06:34:05,232 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:34:05,232 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:34:05,232 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:34:05,232 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/4999977c7e1b:0, corePoolSize=5, maxPoolSize=5 2024-11-10T06:34:05,233 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/4999977c7e1b:0, corePoolSize=10, maxPoolSize=10 2024-11-10T06:34:05,233 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:34:05,233 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/4999977c7e1b:0, corePoolSize=2, maxPoolSize=2 2024-11-10T06:34:05,233 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:34:05,234 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731220475234 2024-11-10T06:34:05,234 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:34:05,235 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T06:34:05,235 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T06:34:05,235 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T06:34:05,235 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T06:34:05,235 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T06:34:05,235 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T06:34:05,235 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T06:34:05,235 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,235 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T06:34:05,235 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T06:34:05,235 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T06:34:05,236 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:34:05,236 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T06:34:05,236 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T06:34:05,236 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T06:34:05,240 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220445236,5,FailOnTimeoutGroup] 2024-11-10T06:34:05,240 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220445240,5,FailOnTimeoutGroup] 2024-11-10T06:34:05,240 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,240 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T06:34:05,240 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,240 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44529 is added to blk_1073741831_1007 (size=1321) 2024-11-10T06:34:05,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37173 is added to blk_1073741831_1007 (size=1321) 2024-11-10T06:34:05,245 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T06:34:05,246 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e 2024-11-10T06:34:05,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37173 is added to blk_1073741832_1008 (size=32) 2024-11-10T06:34:05,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44529 is added to blk_1073741832_1008 (size=32) 2024-11-10T06:34:05,252 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:34:05,253 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T06:34:05,254 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T06:34:05,254 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:34:05,254 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:34:05,255 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T06:34:05,256 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T06:34:05,256 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:34:05,256 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:34:05,256 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T06:34:05,257 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T06:34:05,257 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:34:05,258 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:34:05,258 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T06:34:05,259 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T06:34:05,259 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:34:05,259 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:34:05,259 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T06:34:05,260 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/data/hbase/meta/1588230740 2024-11-10T06:34:05,260 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/data/hbase/meta/1588230740 2024-11-10T06:34:05,261 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T06:34:05,261 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T06:34:05,261 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T06:34:05,263 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T06:34:05,264 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T06:34:05,265 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=829917, jitterRate=0.05529427528381348}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T06:34:05,265 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731220445252Initializing all the Stores at 1731220445252Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220445252Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220445253 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220445253Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220445253Cleaning up temporary data from old regions at 1731220445261 (+8 ms)Region opened successfully at 1731220445265 (+4 ms) 2024-11-10T06:34:05,265 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T06:34:05,265 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T06:34:05,265 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T06:34:05,266 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 1 ms 2024-11-10T06:34:05,266 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T06:34:05,266 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T06:34:05,266 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731220445265Disabling compacts and flushes for region at 1731220445265Disabling writes for close at 1731220445266 (+1 ms)Writing region close event to WAL at 1731220445266Closed at 1731220445266 2024-11-10T06:34:05,267 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:34:05,267 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T06:34:05,267 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T06:34:05,269 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T06:34:05,269 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T06:34:05,301 INFO [RS:0;4999977c7e1b:33787 {}] regionserver.HRegionServer(746): ClusterId : c663742e-44e5-46a0-80a1-d04cf4203e8c 2024-11-10T06:34:05,301 DEBUG [RS:0;4999977c7e1b:33787 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T06:34:05,304 DEBUG [RS:0;4999977c7e1b:33787 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T06:34:05,304 DEBUG [RS:0;4999977c7e1b:33787 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T06:34:05,306 DEBUG [RS:0;4999977c7e1b:33787 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T06:34:05,307 DEBUG [RS:0;4999977c7e1b:33787 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11fd3257, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4999977c7e1b/172.17.0.2:0 2024-11-10T06:34:05,319 DEBUG [RS:0;4999977c7e1b:33787 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;4999977c7e1b:33787 2024-11-10T06:34:05,319 INFO [RS:0;4999977c7e1b:33787 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T06:34:05,319 INFO [RS:0;4999977c7e1b:33787 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T06:34:05,319 DEBUG [RS:0;4999977c7e1b:33787 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T06:34:05,319 INFO [RS:0;4999977c7e1b:33787 {}] regionserver.HRegionServer(2659): reportForDuty to master=4999977c7e1b,40861,1731220445036 with port=33787, startcode=1731220445083 2024-11-10T06:34:05,320 DEBUG [RS:0;4999977c7e1b:33787 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T06:34:05,322 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39355, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T06:34:05,322 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40861 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 4999977c7e1b,33787,1731220445083 2024-11-10T06:34:05,322 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40861 {}] master.ServerManager(517): Registering regionserver=4999977c7e1b,33787,1731220445083 2024-11-10T06:34:05,324 DEBUG [RS:0;4999977c7e1b:33787 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e 2024-11-10T06:34:05,324 DEBUG [RS:0;4999977c7e1b:33787 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46027 2024-11-10T06:34:05,324 DEBUG [RS:0;4999977c7e1b:33787 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T06:34:05,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T06:34:05,326 DEBUG [RS:0;4999977c7e1b:33787 {}] zookeeper.ZKUtil(111): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4999977c7e1b,33787,1731220445083 2024-11-10T06:34:05,326 WARN [RS:0;4999977c7e1b:33787 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T06:34:05,326 INFO [RS:0;4999977c7e1b:33787 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:34:05,326 DEBUG [RS:0;4999977c7e1b:33787 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/WALs/4999977c7e1b,33787,1731220445083 2024-11-10T06:34:05,326 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4999977c7e1b,33787,1731220445083] 2024-11-10T06:34:05,329 INFO [RS:0;4999977c7e1b:33787 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T06:34:05,330 INFO [RS:0;4999977c7e1b:33787 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T06:34:05,332 INFO [RS:0;4999977c7e1b:33787 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T06:34:05,332 INFO [RS:0;4999977c7e1b:33787 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,333 INFO [RS:0;4999977c7e1b:33787 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T06:34:05,333 INFO [RS:0;4999977c7e1b:33787 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T06:34:05,333 INFO [RS:0;4999977c7e1b:33787 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,333 DEBUG [RS:0;4999977c7e1b:33787 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:34:05,333 DEBUG [RS:0;4999977c7e1b:33787 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:34:05,333 DEBUG [RS:0;4999977c7e1b:33787 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:34:05,333 DEBUG [RS:0;4999977c7e1b:33787 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:34:05,333 DEBUG [RS:0;4999977c7e1b:33787 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:34:05,333 DEBUG [RS:0;4999977c7e1b:33787 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4999977c7e1b:0, corePoolSize=2, maxPoolSize=2 2024-11-10T06:34:05,333 DEBUG [RS:0;4999977c7e1b:33787 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:34:05,333 DEBUG [RS:0;4999977c7e1b:33787 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:34:05,333 DEBUG [RS:0;4999977c7e1b:33787 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:34:05,334 DEBUG [RS:0;4999977c7e1b:33787 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:34:05,334 DEBUG [RS:0;4999977c7e1b:33787 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:34:05,334 DEBUG [RS:0;4999977c7e1b:33787 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4999977c7e1b:0, corePoolSize=1, maxPoolSize=1 2024-11-10T06:34:05,334 DEBUG [RS:0;4999977c7e1b:33787 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4999977c7e1b:0, corePoolSize=3, maxPoolSize=3 2024-11-10T06:34:05,334 DEBUG [RS:0;4999977c7e1b:33787 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4999977c7e1b:0, corePoolSize=3, maxPoolSize=3 2024-11-10T06:34:05,334 INFO [RS:0;4999977c7e1b:33787 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,334 INFO [RS:0;4999977c7e1b:33787 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,334 INFO [RS:0;4999977c7e1b:33787 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,334 INFO [RS:0;4999977c7e1b:33787 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,334 INFO [RS:0;4999977c7e1b:33787 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,334 INFO [RS:0;4999977c7e1b:33787 {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,33787,1731220445083-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T06:34:05,348 INFO [RS:0;4999977c7e1b:33787 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T06:34:05,348 INFO [RS:0;4999977c7e1b:33787 {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,33787,1731220445083-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,349 INFO [RS:0;4999977c7e1b:33787 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,349 INFO [RS:0;4999977c7e1b:33787 {}] regionserver.Replication(171): 4999977c7e1b,33787,1731220445083 started 2024-11-10T06:34:05,362 INFO [RS:0;4999977c7e1b:33787 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,362 INFO [RS:0;4999977c7e1b:33787 {}] regionserver.HRegionServer(1482): Serving as 4999977c7e1b,33787,1731220445083, RpcServer on 4999977c7e1b/172.17.0.2:33787, sessionid=0x10190e30cc60001 2024-11-10T06:34:05,362 DEBUG [RS:0;4999977c7e1b:33787 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T06:34:05,362 DEBUG [RS:0;4999977c7e1b:33787 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4999977c7e1b,33787,1731220445083 2024-11-10T06:34:05,362 DEBUG [RS:0;4999977c7e1b:33787 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4999977c7e1b,33787,1731220445083' 2024-11-10T06:34:05,362 DEBUG [RS:0;4999977c7e1b:33787 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T06:34:05,363 DEBUG [RS:0;4999977c7e1b:33787 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T06:34:05,363 DEBUG [RS:0;4999977c7e1b:33787 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T06:34:05,363 DEBUG [RS:0;4999977c7e1b:33787 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T06:34:05,363 DEBUG [RS:0;4999977c7e1b:33787 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4999977c7e1b,33787,1731220445083 2024-11-10T06:34:05,363 DEBUG [RS:0;4999977c7e1b:33787 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4999977c7e1b,33787,1731220445083' 2024-11-10T06:34:05,363 DEBUG [RS:0;4999977c7e1b:33787 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T06:34:05,363 DEBUG [RS:0;4999977c7e1b:33787 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T06:34:05,363 DEBUG [RS:0;4999977c7e1b:33787 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T06:34:05,363 INFO [RS:0;4999977c7e1b:33787 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T06:34:05,363 INFO [RS:0;4999977c7e1b:33787 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T06:34:05,420 WARN [4999977c7e1b:40861 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-10T06:34:05,465 INFO [RS:0;4999977c7e1b:33787 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C33787%2C1731220445083, suffix=, logDir=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/WALs/4999977c7e1b,33787,1731220445083, archiveDir=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/oldWALs, maxLogs=32 2024-11-10T06:34:05,466 INFO [RS:0;4999977c7e1b:33787 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C33787%2C1731220445083.1731220445466 2024-11-10T06:34:05,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,45891,1731220254499/4999977c7e1b%2C45891%2C1731220254499.meta.1731220255324.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:34:05,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40625/user/jenkins/test-data/69c3652b-5c42-005a-9349-ccf648283edf/WALs/4999977c7e1b,42305,1731220255452/4999977c7e1b%2C42305%2C1731220255452.1731220255648 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T06:34:05,472 INFO [RS:0;4999977c7e1b:33787 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/WALs/4999977c7e1b,33787,1731220445083/4999977c7e1b%2C33787%2C1731220445083.1731220445466 2024-11-10T06:34:05,475 DEBUG [RS:0;4999977c7e1b:33787 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42145:42145),(127.0.0.1/127.0.0.1:34841:34841)] 2024-11-10T06:34:05,498 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T06:34:05,499 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-10T06:34:05,499 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-10T06:34:05,670 DEBUG [4999977c7e1b:40861 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-10T06:34:05,671 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=4999977c7e1b,33787,1731220445083 2024-11-10T06:34:05,672 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4999977c7e1b,33787,1731220445083, state=OPENING 2024-11-10T06:34:05,673 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T06:34:05,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:34:05,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:34:05,676 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T06:34:05,676 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:34:05,676 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:34:05,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=4999977c7e1b,33787,1731220445083}] 2024-11-10T06:34:05,829 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T06:34:05,831 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40497, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T06:34:05,834 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T06:34:05,834 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:34:05,836 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4999977c7e1b%2C33787%2C1731220445083.meta, suffix=.meta, logDir=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/WALs/4999977c7e1b,33787,1731220445083, archiveDir=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/oldWALs, maxLogs=32 2024-11-10T06:34:05,837 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 4999977c7e1b%2C33787%2C1731220445083.meta.1731220445837.meta 2024-11-10T06:34:05,843 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/WALs/4999977c7e1b,33787,1731220445083/4999977c7e1b%2C33787%2C1731220445083.meta.1731220445837.meta 2024-11-10T06:34:05,848 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42145:42145),(127.0.0.1/127.0.0.1:34841:34841)] 2024-11-10T06:34:05,852 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T06:34:05,852 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T06:34:05,853 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T06:34:05,853 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T06:34:05,853 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T06:34:05,853 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T06:34:05,853 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T06:34:05,853 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T06:34:05,854 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T06:34:05,854 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T06:34:05,855 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:34:05,855 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:34:05,855 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T06:34:05,855 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T06:34:05,856 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:34:05,856 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:34:05,856 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T06:34:05,856 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T06:34:05,857 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:34:05,857 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:34:05,857 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T06:34:05,857 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T06:34:05,857 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T06:34:05,858 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T06:34:05,858 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T06:34:05,858 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/data/hbase/meta/1588230740 2024-11-10T06:34:05,859 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/data/hbase/meta/1588230740 2024-11-10T06:34:05,860 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T06:34:05,860 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T06:34:05,861 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T06:34:05,862 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T06:34:05,862 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=836695, jitterRate=0.06391310691833496}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T06:34:05,862 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T06:34:05,863 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731220445853Writing region info on filesystem at 1731220445853Initializing all the Stores at 1731220445853Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220445854 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220445854Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731220445854Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731220445854Cleaning up temporary data from old regions at 1731220445860 (+6 ms)Running coprocessor post-open hooks at 1731220445862 (+2 ms)Region opened successfully at 1731220445863 (+1 ms) 2024-11-10T06:34:05,864 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731220445829 2024-11-10T06:34:05,866 DEBUG [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T06:34:05,866 INFO [RS_OPEN_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T06:34:05,867 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=4999977c7e1b,33787,1731220445083 2024-11-10T06:34:05,867 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4999977c7e1b,33787,1731220445083, state=OPEN 2024-11-10T06:34:05,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T06:34:05,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T06:34:05,872 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=4999977c7e1b,33787,1731220445083 2024-11-10T06:34:05,872 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:34:05,872 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T06:34:05,874 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T06:34:05,874 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=4999977c7e1b,33787,1731220445083 in 196 msec 2024-11-10T06:34:05,876 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T06:34:05,876 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 607 msec 2024-11-10T06:34:05,877 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T06:34:05,877 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T06:34:05,878 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T06:34:05,878 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4999977c7e1b,33787,1731220445083, seqNum=-1] 2024-11-10T06:34:05,878 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T06:34:05,879 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52581, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T06:34:05,883 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 653 msec 2024-11-10T06:34:05,884 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731220445884, completionTime=-1 2024-11-10T06:34:05,884 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-10T06:34:05,884 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-10T06:34:05,886 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-10T06:34:05,886 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731220505886 2024-11-10T06:34:05,886 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731220565886 2024-11-10T06:34:05,886 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-10T06:34:05,886 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,40861,1731220445036-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,886 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,40861,1731220445036-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,886 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,40861,1731220445036-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,886 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-4999977c7e1b:40861, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,886 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,887 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,888 DEBUG [master/4999977c7e1b:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T06:34:05,890 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.772sec 2024-11-10T06:34:05,890 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T06:34:05,890 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T06:34:05,890 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T06:34:05,890 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T06:34:05,890 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T06:34:05,890 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,40861,1731220445036-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T06:34:05,890 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,40861,1731220445036-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T06:34:05,892 DEBUG [master/4999977c7e1b:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T06:34:05,892 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T06:34:05,892 INFO [master/4999977c7e1b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4999977c7e1b,40861,1731220445036-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T06:34:05,901 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@606ca92f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:34:05,901 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 4999977c7e1b,40861,-1 for getting cluster id 2024-11-10T06:34:05,901 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T06:34:05,903 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c663742e-44e5-46a0-80a1-d04cf4203e8c' 2024-11-10T06:34:05,903 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T06:34:05,903 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c663742e-44e5-46a0-80a1-d04cf4203e8c" 2024-11-10T06:34:05,903 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62953ace, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:34:05,903 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [4999977c7e1b,40861,-1] 2024-11-10T06:34:05,903 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T06:34:05,904 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:34:05,905 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59708, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T06:34:05,905 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43dd61fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T06:34:05,906 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T06:34:05,906 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=4999977c7e1b,33787,1731220445083, seqNum=-1] 2024-11-10T06:34:05,907 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T06:34:05,907 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34906, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T06:34:05,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=4999977c7e1b,40861,1731220445036 2024-11-10T06:34:05,909 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T06:34:05,911 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-10T06:34:05,911 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T06:34:05,913 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/WALs/test.com,8080,1, archiveDir=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/oldWALs, maxLogs=32 2024-11-10T06:34:05,913 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731220445913 2024-11-10T06:34:05,918 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/WALs/test.com,8080,1/test.com%2C8080%2C1.1731220445913 2024-11-10T06:34:05,919 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34841:34841),(127.0.0.1/127.0.0.1:42145:42145)] 2024-11-10T06:34:05,920 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731220445919 2024-11-10T06:34:05,925 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:05,926 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:05,926 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:05,926 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:05,926 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:05,926 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/WALs/test.com,8080,1/test.com%2C8080%2C1.1731220445913 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/WALs/test.com,8080,1/test.com%2C8080%2C1.1731220445919 2024-11-10T06:34:05,928 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34841:34841),(127.0.0.1/127.0.0.1:42145:42145)] 2024-11-10T06:34:05,928 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/WALs/test.com,8080,1/test.com%2C8080%2C1.1731220445913 is not closed yet, will try archiving it next time 2024-11-10T06:34:05,929 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:05,929 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:05,929 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:05,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37173 is added to blk_1073741835_1011 (size=93) 2024-11-10T06:34:05,929 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:05,929 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:05,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44529 is added to blk_1073741835_1011 (size=93) 2024-11-10T06:34:05,930 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/WALs/test.com,8080,1/test.com%2C8080%2C1.1731220445913 to hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/oldWALs/test.com%2C8080%2C1.1731220445913 2024-11-10T06:34:05,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37173 is added to blk_1073741836_1012 (size=93) 2024-11-10T06:34:05,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44529 is added to blk_1073741836_1012 (size=93) 2024-11-10T06:34:05,933 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/oldWALs 2024-11-10T06:34:05,933 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731220445919) 2024-11-10T06:34:05,933 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T06:34:05,933 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T06:34:05,933 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:34:05,933 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:34:05,933 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:34:05,933 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T06:34:05,933 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T06:34:05,933 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=578312045, stopped=false 2024-11-10T06:34:05,933 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=4999977c7e1b,40861,1731220445036 2024-11-10T06:34:05,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T06:34:05,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T06:34:05,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:34:05,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:34:05,935 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T06:34:05,936 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T06:34:05,936 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:34:05,936 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:34:05,936 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '4999977c7e1b,33787,1731220445083' ***** 2024-11-10T06:34:05,936 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:34:05,936 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T06:34:05,936 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T06:34:05,936 INFO [RS:0;4999977c7e1b:33787 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T06:34:05,936 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T06:34:05,937 INFO [RS:0;4999977c7e1b:33787 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T06:34:05,937 INFO [RS:0;4999977c7e1b:33787 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T06:34:05,937 INFO [RS:0;4999977c7e1b:33787 {}] regionserver.HRegionServer(959): stopping server 4999977c7e1b,33787,1731220445083 2024-11-10T06:34:05,937 INFO [RS:0;4999977c7e1b:33787 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T06:34:05,937 INFO [RS:0;4999977c7e1b:33787 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;4999977c7e1b:33787. 2024-11-10T06:34:05,937 DEBUG [RS:0;4999977c7e1b:33787 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T06:34:05,937 DEBUG [RS:0;4999977c7e1b:33787 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:34:05,937 INFO [RS:0;4999977c7e1b:33787 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T06:34:05,937 INFO [RS:0;4999977c7e1b:33787 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T06:34:05,937 INFO [RS:0;4999977c7e1b:33787 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T06:34:05,937 INFO [RS:0;4999977c7e1b:33787 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T06:34:05,937 INFO [RS:0;4999977c7e1b:33787 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-10T06:34:05,937 DEBUG [RS:0;4999977c7e1b:33787 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-10T06:34:05,937 DEBUG [RS:0;4999977c7e1b:33787 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-10T06:34:05,937 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T06:34:05,938 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T06:34:05,938 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T06:34:05,938 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T06:34:05,938 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T06:34:05,938 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-10T06:34:05,953 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/data/hbase/meta/1588230740/.tmp/ns/43f536f9e08840dca3fb99d1b661852e is 43, key is default/ns:d/1731220445880/Put/seqid=0 2024-11-10T06:34:05,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37173 is added to blk_1073741837_1013 (size=5153) 2024-11-10T06:34:05,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44529 is added to blk_1073741837_1013 (size=5153) 2024-11-10T06:34:05,958 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/data/hbase/meta/1588230740/.tmp/ns/43f536f9e08840dca3fb99d1b661852e 2024-11-10T06:34:05,963 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/data/hbase/meta/1588230740/.tmp/ns/43f536f9e08840dca3fb99d1b661852e as hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/data/hbase/meta/1588230740/ns/43f536f9e08840dca3fb99d1b661852e 2024-11-10T06:34:05,967 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/data/hbase/meta/1588230740/ns/43f536f9e08840dca3fb99d1b661852e, entries=2, sequenceid=6, filesize=5.0 K 2024-11-10T06:34:05,968 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false 2024-11-10T06:34:05,971 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-10T06:34:05,972 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T06:34:05,972 INFO [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T06:34:05,972 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731220445937Running coprocessor pre-close hooks at 1731220445937Disabling compacts and flushes for region at 1731220445937Disabling writes for close at 1731220445938 (+1 ms)Obtaining lock to block concurrent updates at 1731220445938Preparing flush snapshotting stores in 1588230740 at 1731220445938Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731220445938Flushing stores of hbase:meta,,1.1588230740 at 1731220445938Flushing 1588230740/ns: creating writer at 1731220445939 (+1 ms)Flushing 1588230740/ns: appending metadata at 1731220445953 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731220445953Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a1f2182: reopening flushed file at 1731220445962 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false at 1731220445968 (+6 ms)Writing region close event to WAL at 1731220445968Running coprocessor post-close hooks at 1731220445972 (+4 ms)Closed at 1731220445972 2024-11-10T06:34:05,972 DEBUG [RS_CLOSE_META-regionserver/4999977c7e1b:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T06:34:06,138 INFO [RS:0;4999977c7e1b:33787 {}] regionserver.HRegionServer(976): stopping server 4999977c7e1b,33787,1731220445083; all regions closed. 2024-11-10T06:34:06,138 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:06,138 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:06,138 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:06,138 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:06,138 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:06,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44529 is added to blk_1073741834_1010 (size=1152) 2024-11-10T06:34:06,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37173 is added to blk_1073741834_1010 (size=1152) 2024-11-10T06:34:06,143 DEBUG [RS:0;4999977c7e1b:33787 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/oldWALs 2024-11-10T06:34:06,143 INFO [RS:0;4999977c7e1b:33787 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 4999977c7e1b%2C33787%2C1731220445083.meta:.meta(num 1731220445837) 2024-11-10T06:34:06,143 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:06,143 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:06,143 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:06,143 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:06,144 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:06,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44529 is added to blk_1073741833_1009 (size=93) 2024-11-10T06:34:06,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37173 is added to blk_1073741833_1009 (size=93) 2024-11-10T06:34:06,147 DEBUG [RS:0;4999977c7e1b:33787 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/oldWALs 2024-11-10T06:34:06,147 INFO [RS:0;4999977c7e1b:33787 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 4999977c7e1b%2C33787%2C1731220445083:(num 1731220445466) 2024-11-10T06:34:06,147 DEBUG [RS:0;4999977c7e1b:33787 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T06:34:06,147 INFO [RS:0;4999977c7e1b:33787 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T06:34:06,147 INFO [RS:0;4999977c7e1b:33787 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T06:34:06,147 INFO [RS:0;4999977c7e1b:33787 {}] hbase.ChoreService(370): Chore service for: regionserver/4999977c7e1b:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T06:34:06,147 INFO [RS:0;4999977c7e1b:33787 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T06:34:06,148 INFO [regionserver/4999977c7e1b:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T06:34:06,148 INFO [RS:0;4999977c7e1b:33787 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33787 2024-11-10T06:34:06,151 INFO [RS:0;4999977c7e1b:33787 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T06:34:06,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T06:34:06,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4999977c7e1b,33787,1731220445083 2024-11-10T06:34:06,153 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4999977c7e1b,33787,1731220445083] 2024-11-10T06:34:06,154 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/4999977c7e1b,33787,1731220445083 already deleted, retry=false 2024-11-10T06:34:06,154 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 4999977c7e1b,33787,1731220445083 expired; onlineServers=0 2024-11-10T06:34:06,154 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '4999977c7e1b,40861,1731220445036' ***** 2024-11-10T06:34:06,154 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T06:34:06,154 INFO [M:0;4999977c7e1b:40861 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T06:34:06,154 INFO [M:0;4999977c7e1b:40861 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T06:34:06,154 DEBUG [M:0;4999977c7e1b:40861 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T06:34:06,155 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T06:34:06,155 DEBUG [M:0;4999977c7e1b:40861 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T06:34:06,155 DEBUG [master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220445240 {}] cleaner.HFileCleaner(306): Exit Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.small.0-1731220445240,5,FailOnTimeoutGroup] 2024-11-10T06:34:06,155 DEBUG [master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220445236 {}] cleaner.HFileCleaner(306): Exit Thread[master/4999977c7e1b:0:becomeActiveMaster-HFileCleaner.large.0-1731220445236,5,FailOnTimeoutGroup] 2024-11-10T06:34:06,155 INFO [M:0;4999977c7e1b:40861 {}] hbase.ChoreService(370): Chore service for: master/4999977c7e1b:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T06:34:06,155 INFO [M:0;4999977c7e1b:40861 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T06:34:06,155 DEBUG [M:0;4999977c7e1b:40861 {}] master.HMaster(1795): Stopping service threads 2024-11-10T06:34:06,155 INFO [M:0;4999977c7e1b:40861 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T06:34:06,155 INFO [M:0;4999977c7e1b:40861 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T06:34:06,155 INFO [M:0;4999977c7e1b:40861 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T06:34:06,155 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T06:34:06,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T06:34:06,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T06:34:06,156 DEBUG [M:0;4999977c7e1b:40861 {}] zookeeper.ZKUtil(347): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T06:34:06,156 WARN [M:0;4999977c7e1b:40861 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T06:34:06,156 INFO [M:0;4999977c7e1b:40861 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/.lastflushedseqids 2024-11-10T06:34:06,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44529 is added to blk_1073741838_1014 (size=99) 2024-11-10T06:34:06,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37173 is added to blk_1073741838_1014 (size=99) 2024-11-10T06:34:06,165 INFO [M:0;4999977c7e1b:40861 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T06:34:06,165 INFO [M:0;4999977c7e1b:40861 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T06:34:06,165 DEBUG [M:0;4999977c7e1b:40861 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T06:34:06,165 INFO [M:0;4999977c7e1b:40861 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:34:06,165 DEBUG [M:0;4999977c7e1b:40861 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:34:06,165 DEBUG [M:0;4999977c7e1b:40861 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T06:34:06,165 DEBUG [M:0;4999977c7e1b:40861 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:34:06,165 INFO [M:0;4999977c7e1b:40861 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-10T06:34:06,181 DEBUG [M:0;4999977c7e1b:40861 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/14e76aedb5c947c99affc56584229576 is 82, key is hbase:meta,,1/info:regioninfo/1731220445866/Put/seqid=0 2024-11-10T06:34:06,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37173 is added to blk_1073741839_1015 (size=5672) 2024-11-10T06:34:06,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44529 is added to blk_1073741839_1015 (size=5672) 2024-11-10T06:34:06,185 INFO [M:0;4999977c7e1b:40861 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/14e76aedb5c947c99affc56584229576 2024-11-10T06:34:06,204 DEBUG [M:0;4999977c7e1b:40861 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/36754cea39d14351962c64c8bc966ff0 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731220445883/Put/seqid=0 2024-11-10T06:34:06,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37173 is added to blk_1073741840_1016 (size=5275) 2024-11-10T06:34:06,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44529 is added to blk_1073741840_1016 (size=5275) 2024-11-10T06:34:06,208 INFO [M:0;4999977c7e1b:40861 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/36754cea39d14351962c64c8bc966ff0 2024-11-10T06:34:06,226 DEBUG [M:0;4999977c7e1b:40861 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4c6f6191ebec490682c6bd2f08c9de8a is 69, key is 4999977c7e1b,33787,1731220445083/rs:state/1731220445322/Put/seqid=0 2024-11-10T06:34:06,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37173 is added to blk_1073741841_1017 (size=5156) 2024-11-10T06:34:06,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44529 is added to blk_1073741841_1017 (size=5156) 2024-11-10T06:34:06,231 INFO [M:0;4999977c7e1b:40861 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4c6f6191ebec490682c6bd2f08c9de8a 2024-11-10T06:34:06,249 DEBUG [M:0;4999977c7e1b:40861 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/17a34e2744a84a069e7c34d4ed6fe5b0 is 52, key is load_balancer_on/state:d/1731220445910/Put/seqid=0 2024-11-10T06:34:06,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:34:06,253 INFO [RS:0;4999977c7e1b:33787 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T06:34:06,253 INFO [RS:0;4999977c7e1b:33787 {}] regionserver.HRegionServer(1031): Exiting; stopping=4999977c7e1b,33787,1731220445083; zookeeper connection closed. 2024-11-10T06:34:06,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33787-0x10190e30cc60001, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:34:06,253 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@59afa34a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@59afa34a 2024-11-10T06:34:06,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44529 is added to blk_1073741842_1018 (size=5056) 2024-11-10T06:34:06,254 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-10T06:34:06,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37173 is added to blk_1073741842_1018 (size=5056) 2024-11-10T06:34:06,254 INFO [M:0;4999977c7e1b:40861 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/17a34e2744a84a069e7c34d4ed6fe5b0 2024-11-10T06:34:06,259 DEBUG [M:0;4999977c7e1b:40861 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/14e76aedb5c947c99affc56584229576 as hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/14e76aedb5c947c99affc56584229576 2024-11-10T06:34:06,263 INFO [M:0;4999977c7e1b:40861 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/14e76aedb5c947c99affc56584229576, entries=8, sequenceid=29, filesize=5.5 K 2024-11-10T06:34:06,263 DEBUG [M:0;4999977c7e1b:40861 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/36754cea39d14351962c64c8bc966ff0 as hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/36754cea39d14351962c64c8bc966ff0 2024-11-10T06:34:06,267 INFO [M:0;4999977c7e1b:40861 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/36754cea39d14351962c64c8bc966ff0, entries=3, sequenceid=29, filesize=5.2 K 2024-11-10T06:34:06,268 DEBUG [M:0;4999977c7e1b:40861 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4c6f6191ebec490682c6bd2f08c9de8a as hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4c6f6191ebec490682c6bd2f08c9de8a 2024-11-10T06:34:06,271 INFO [M:0;4999977c7e1b:40861 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4c6f6191ebec490682c6bd2f08c9de8a, entries=1, sequenceid=29, filesize=5.0 K 2024-11-10T06:34:06,272 DEBUG [M:0;4999977c7e1b:40861 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/17a34e2744a84a069e7c34d4ed6fe5b0 as hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/17a34e2744a84a069e7c34d4ed6fe5b0 2024-11-10T06:34:06,276 INFO [M:0;4999977c7e1b:40861 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46027/user/jenkins/test-data/39b8ce56-44aa-dfcb-5abc-59534a419e0e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/17a34e2744a84a069e7c34d4ed6fe5b0, entries=1, sequenceid=29, filesize=4.9 K 2024-11-10T06:34:06,277 INFO [M:0;4999977c7e1b:40861 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=29, compaction requested=false 2024-11-10T06:34:06,278 INFO [M:0;4999977c7e1b:40861 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T06:34:06,278 DEBUG [M:0;4999977c7e1b:40861 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731220446165Disabling compacts and flushes for region at 1731220446165Disabling writes for close at 1731220446165Obtaining lock to block concurrent updates at 1731220446165Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731220446165Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731220446166 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731220446166Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731220446166Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731220446180 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731220446180Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731220446189 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731220446203 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731220446203Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731220446212 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731220446226 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731220446226Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731220446235 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731220446249 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731220446249Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1378d8a: reopening flushed file at 1731220446258 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6dc1db47: reopening flushed file at 1731220446263 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e5d5764: reopening flushed file at 1731220446267 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@708d5842: reopening flushed file at 1731220446271 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=29, compaction requested=false at 1731220446277 (+6 ms)Writing region close event to WAL at 1731220446278 (+1 ms)Closed at 1731220446278 2024-11-10T06:34:06,279 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:06,279 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:06,279 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:06,279 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:06,279 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T06:34:06,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44529 is added to blk_1073741830_1006 (size=10311) 2024-11-10T06:34:06,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37173 is added to blk_1073741830_1006 (size=10311) 2024-11-10T06:34:06,282 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T06:34:06,282 INFO [M:0;4999977c7e1b:40861 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T06:34:06,282 INFO [M:0;4999977c7e1b:40861 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40861 2024-11-10T06:34:06,282 INFO [M:0;4999977c7e1b:40861 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T06:34:06,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:34:06,384 INFO [M:0;4999977c7e1b:40861 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T06:34:06,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40861-0x10190e30cc60000, quorum=127.0.0.1:49457, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T06:34:06,387 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@36271130{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:34:06,387 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2f8244c4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:34:06,387 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:34:06,387 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@787f2444{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:34:06,388 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4519d7f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/hadoop.log.dir/,STOPPED} 2024-11-10T06:34:06,389 WARN [BP-260444421-172.17.0.2-1731220444401 heartbeating to localhost/127.0.0.1:46027 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:34:06,389 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:34:06,389 WARN [BP-260444421-172.17.0.2-1731220444401 heartbeating to localhost/127.0.0.1:46027 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-260444421-172.17.0.2-1731220444401 (Datanode Uuid 10d06763-585f-4d42-9665-1a9078eeefd8) service to localhost/127.0.0.1:46027 2024-11-10T06:34:06,389 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:34:06,389 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/cluster_55264451-bed9-b2e4-0fb1-ec85e39516d9/data/data3/current/BP-260444421-172.17.0.2-1731220444401 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:34:06,390 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/cluster_55264451-bed9-b2e4-0fb1-ec85e39516d9/data/data4/current/BP-260444421-172.17.0.2-1731220444401 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:34:06,390 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:34:06,392 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@280c54a4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T06:34:06,392 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2fba4655{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:34:06,392 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:34:06,392 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@525207a7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:34:06,392 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5d1dcdb8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/hadoop.log.dir/,STOPPED} 2024-11-10T06:34:06,393 WARN [BP-260444421-172.17.0.2-1731220444401 heartbeating to localhost/127.0.0.1:46027 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T06:34:06,393 WARN [BP-260444421-172.17.0.2-1731220444401 heartbeating to localhost/127.0.0.1:46027 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-260444421-172.17.0.2-1731220444401 (Datanode Uuid 7535dde5-f1e3-44af-8787-1490bfa5ef54) service to localhost/127.0.0.1:46027 2024-11-10T06:34:06,393 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T06:34:06,393 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T06:34:06,394 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/cluster_55264451-bed9-b2e4-0fb1-ec85e39516d9/data/data1/current/BP-260444421-172.17.0.2-1731220444401 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:34:06,394 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/cluster_55264451-bed9-b2e4-0fb1-ec85e39516d9/data/data2/current/BP-260444421-172.17.0.2-1731220444401 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T06:34:06,394 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T06:34:06,399 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@10602cad{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T06:34:06,400 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3ae4e29{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T06:34:06,400 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T06:34:06,400 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3364e6da{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T06:34:06,400 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10471e56{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/8d7847d6-0a65-6584-4a16-3f2f79cabc07/hadoop.log.dir/,STOPPED} 2024-11-10T06:34:06,406 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-10T06:34:06,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-10T06:34:06,430 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=267 (was 230) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46027 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:46027 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:46027 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46027 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46027 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:46027 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:46027 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46027 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=537 (was 512) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=62 (was 15) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6943 (was 6948)